From 5181ddd4e8551f86e46c77f642e6ed611feb323b Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 22 Dec 2020 14:32:02 -0800 Subject: [PATCH] feat: Introduce tracks and thumbnails fields for FaceDetectionAnnotations (#90) This PR was generated using Autosynth. :rainbow: Synth log will be available here: https://source.cloud.google.com/results/invocations/2e07b4d7-dfb2-4ae0-bdea-6cc5c2642547/targets - [ ] To automatically regenerate this PR, check this box. PiperOrigin-RevId: 346664095 Source-Link: https://github.com/googleapis/googleapis/commit/468a94a87b9f80d22d1f5e3076d5bab73a5c996f PiperOrigin-RevId: 346138329 Source-Link: https://github.com/googleapis/googleapis/commit/674ec0e684b20e57f8ab4811a44ba50631d0ef8b Source-Link: https://github.com/googleapis/synthtool/commit/7fcc405a579d5d53a726ff3da1b7c8c08f0f2d58 --- .../proto/video_intelligence.proto | 906 ++++++++++++++ .../types/video_intelligence.py | 8 + .../proto/video_intelligence.proto | 408 ++++++ .../proto/video_intelligence.proto | 444 +++++++ .../proto/video_intelligence.proto | 476 +++++++ .../proto/video_intelligence.proto | 1089 +++++++++++++++++ .../google-cloud-videointelligence/noxfile.py | 2 +- .../samples/analyze/README.rst | 195 --- .../samples/analyze/noxfile.py | 34 +- .../samples/labels/README.rst | 135 -- .../samples/labels/noxfile.py | 34 +- .../samples/quickstart/noxfile.py | 34 +- .../samples/shotchange/noxfile.py | 35 +- .../fixup_videointelligence_v1_keywords.py | 179 +++ ...ixup_videointelligence_v1beta2_keywords.py | 179 +++ ...up_videointelligence_v1p1beta1_keywords.py | 179 +++ ...up_videointelligence_v1p2beta1_keywords.py | 179 +++ ...up_videointelligence_v1p3beta1_keywords.py | 180 +++ .../synth.metadata | 176 +-- 19 files changed, 4393 insertions(+), 479 deletions(-) create mode 100644 packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto create mode 100644 packages/google-cloud-videointelligence/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto create mode 100644 packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto create mode 100644 packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto create mode 100644 packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto delete mode 100644 packages/google-cloud-videointelligence/samples/analyze/README.rst delete mode 100644 packages/google-cloud-videointelligence/samples/labels/README.rst create mode 100644 packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1_keywords.py create mode 100644 packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1beta2_keywords.py create mode 100644 packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p1beta1_keywords.py create mode 100644 packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p2beta1_keywords.py create mode 100644 packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p3beta1_keywords.py diff --git a/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto new file mode 100644 index 000000000000..648ec4752534 --- /dev/null +++ b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/proto/video_intelligence.proto @@ -0,0 +1,906 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.videointelligence.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1"; + +// Service that implements the Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported. URIs must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + // multiple videos, a video URI may include wildcards in the `object-id`. + // Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` must be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via the `input_uri`. + // If set, `input_uri` must be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported. These must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + // region is specified, the region will be determined based on video file + // location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; + + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig speech_transcription_config = 6; + + // Config for TEXT_DETECTION. + TextDetectionConfig text_detection_config = 8; + + // Config for PERSON_DETECTION. + PersonDetectionConfig person_detection_config = 11; + + // Config for OBJECT_TRACKING. + ObjectTrackingConfig object_tracking_config = 13; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection. + FACE_DETECTION = 4; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; + + // Logo detection, tracking, and recognition. + LOGO_RECOGNITION = 12; + + // Person detection. + PERSON_DETECTION = 14; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e., non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; + + // The confidence threshold we perform filtering on the labels from + // frame-level detection. If not set, it is set to 0.4 by default. The valid + // range for this threshold is [0.1, 0.9]. Any value set outside of this + // range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float frame_confidence_threshold = 4; + + // The confidence threshold we perform filtering on the labels from + // video-level and shot-level detections. If not set, it's set to 0.3 by + // default. The valid range for this threshold is [0.1, 0.9]. Any value set + // outside of this range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float video_confidence_threshold = 5; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for OBJECT_TRACKING. +message ObjectTrackingConfig { + // Model to use for object tracking. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes are included in the face annotation output. + bool include_bounding_boxes = 2; + + // Whether to enable face attributes detection, such as glasses, dark_glasses, + // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 5; +} + +// Config for PERSON_DETECTION. +message PersonDetectionConfig { + // Whether bounding boxes are included in the person detection annotation + // output. + bool include_bounding_boxes = 1; + + // Whether to enable pose landmarks detection. Ignored if + // 'include_bounding_boxes' is set to false. + bool include_pose_landmarks = 2; + + // Whether to enable person attributes detection, such as cloth color (black, + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, + // etc. + // Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 3; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for TEXT_DETECTION. +message TextDetectionConfig { + // Language hint can be specified if the language to be detected is known a + // priori. It can increase the accuracy of the detection. Language hint must + // be language code in BCP-47 format. + // + // Automatic language detection is performed if no hint is provided. + repeated string language_hints = 1; + + // Model to use for text detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 2; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g., `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // For example, when the label is `Terrier`, the category is likely `dog`. And + // in some cases there might be more than one categories e.g., `Terrier` could + // also be a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; + + // Feature version. + string version = 5; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; + + // Feature version. + string version = 2; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Face detection annotation. +message FaceDetectionAnnotation { + // The face tracks with attributes. + repeated Track tracks = 3; + + // The thumbnail of a person's face. + bytes thumbnail = 4; + + // Feature version. + string version = 5; +} + +// Person detection annotation per video. +message PersonDetectionAnnotation { + // The detected tracks of a person. + repeated Track tracks = 1; + + // Feature version. + string version = 2; +} + +// Video segment level annotation results for face detection. +message FaceSegment { + // Video segment where a face was detected. + VideoSegment segment = 1; +} + +// Deprecated. No effect. +message FaceFrame { + option deprecated = true; + + // Normalized Bounding boxes in a frame. + // There can be more than one boxes if the same face is detected in multiple + // locations within the current frame. + repeated NormalizedBoundingBox normalized_bounding_boxes = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this location. + google.protobuf.Duration time_offset = 2; +} + +// Deprecated. No effect. +message FaceAnnotation { + option deprecated = true; + + // Thumbnail of a representative face view (in JPEG format). + bytes thumbnail = 1; + + // All video segments where a face was detected. + repeated FaceSegment segments = 2; + + // All video frames where a face was detected. + repeated FaceFrame frames = 3; +} + +// For tracking related features. +// An object at time_offset with attributes, and located with +// normalized_bounding_box. +message TimestampedObject { + // Normalized Bounding box in a frame, where the object is located. + NormalizedBoundingBox normalized_bounding_box = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this object. + google.protobuf.Duration time_offset = 2; + + // Optional. The attributes of the object in the bounding box. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The detected landmarks. + repeated DetectedLandmark landmarks = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A track of an object instance. +message Track { + // Video segment of a track. + VideoSegment segment = 1; + + // The object with timestamp and attributes per frame in the track. + repeated TimestampedObject timestamped_objects = 2; + + // Optional. Attributes in the track level. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The confidence score of the tracked object. + float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A generic detected attribute represented by name in string format. +message DetectedAttribute { + // The name of the attribute, for example, glasses, dark_glasses, mouth_open. + // A full list of supported type names will be provided in the document. + string name = 1; + + // Detected attribute confidence. Range [0, 1]. + float confidence = 2; + + // Text value of the detection result. For example, the value for "HairColor" + // can be "black", "blonde", etc. + string value = 3; +} + +// A generic detected landmark represented by name in string format and a 2D +// location. +message DetectedLandmark { + // The name of this landmark, for example, left_hand, right_shoulder. + string name = 1; + + // The 2D point of the detected landmark using the normalized image + // coordindate system. The normalized coordinates have the range from 0 to 1. + NormalizedVertex point = 2; + + // The confidence score of the detected landmark. Range [0, 1]. + float confidence = 3; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Video segment on which the annotation is run. + VideoSegment segment = 10; + + // Topical label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Presence label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. Compared to the + // existing topical `segment_label_annotations`, this field presents more + // fine-grained, segment-level labels detected in video content and is made + // available only when the client sets `LabelDetectionConfig.model` to + // "builtin/latest" in the request. + repeated LabelAnnotation segment_presence_label_annotations = 23; + + // Topical label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Presence label annotations on shot level. There is exactly one element for + // each unique label. Compared to the existing topical + // `shot_label_annotations`, this field presents more fine-grained, shot-level + // labels detected in video content and is made available only when the client + // sets `LabelDetectionConfig.model` to "builtin/latest" in the request. + repeated LabelAnnotation shot_presence_label_annotations = 24; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Deprecated. Please use `face_detection_annotations` instead. + repeated FaceAnnotation face_annotations = 5 [deprecated = true]; + + // Face detection annotations. + repeated FaceDetectionAnnotation face_detection_annotations = 13; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // Speech transcription. + repeated SpeechTranscription speech_transcriptions = 11; + + // OCR text detection and tracking. + // Annotations for list of detected text snippets. Each will have list of + // frame information associated with it. + repeated TextAnnotation text_annotations = 12; + + // Annotations for list of objects detected and tracked in video. + repeated ObjectTrackingAnnotation object_annotations = 14; + + // Annotations for list of logos detected, tracked and recognized in video. + repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + + // Person detection annotations. + repeated PersonDetectionAnnotation person_detection_annotations = 20; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; + + // Specifies which feature is being tracked if the request contains more than + // one feature. + Feature feature = 5; + + // Specifies which segment is being tracked if the request contains more than + // one segment. + VideoSegment segment = 6; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Config for SPEECH_TRANSCRIPTION. +message SpeechTranscriptionConfig { + // Required. *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechTranscription`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. The default 'false' value + // does not add punctuation to result hypotheses. NOTE: "This is currently + // offered as an experimental service, complimentary to all users. In the + // future this may be exclusively available as a premium feature." + bool enable_automatic_punctuation = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', enables speaker detection for each recognized word in + // the top alternative of the recognition result using a speaker_tag provided + // in the WordInfo. + // Note: When this is true, we send all the words from the beginning of the + // audio for the top alternative in every consecutive response. + // This is done in order to improve our speaker tags as our models learn to + // identify the speakers in the conversation over time. + bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set, specifies the estimated number of speakers in the + // conversation. If not set, defaults to '2'. Ignored unless + // enable_speaker_diarization is set to true. + int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If `true`, the top result includes a list of words and the + // confidence for those words. If `false`, no word-level confidence + // information is returned. The default is `false`. + bool enable_word_confidence = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +message SpeechContext { + // Optional. A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// A speech recognition result corresponding to a portion of the audio. +message SpeechTranscription { + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. + repeated SpeechRecognitionAlternative alternatives = 1; + + // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) + // language tag of the language in this result. This language code was + // detected to have the most likelihood of being spoken in the audio. + string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Alternative hypotheses (a.k.a. n-best list). +message SpeechRecognitionAlternative { + // Transcript text representing the words that the user spoke. + string transcript = 1; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A list of word-specific information for each recognized word. + // Note: When `enable_speaker_diarization` is set to true, you will see all + // the words from the beginning of the audio. + repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +message WordInfo { + // Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration start_time = 1; + + // Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration end_time = 2; + + // The word corresponding to this set of information. + string word = 3; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A distinct integer value is assigned for every speaker within + // the audio. This field specifies which one of those speakers was detected to + // have spoken this word. Value ranges from 1 up to diarization_speaker_count, + // and is only set if speaker diarization is enabled. + int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A vertex represents a 2D point in the image. +// NOTE: the normalized vertex coordinates are relative to the original image +// and range from 0 to 1. +message NormalizedVertex { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; +} + +// Normalized bounding polygon for text (that might not be aligned with axis). +// Contains list of the corner points in clockwise order starting from +// top-left corner. For example, for a rectangular bounding box: +// When the text is horizontal it might look like: +// 0----1 +// | | +// 3----2 +// +// When it's clockwise rotated 180 degrees around the top-left corner it +// becomes: +// 2----3 +// | | +// 1----0 +// +// and the vertex order will still be (0, 1, 2, 3). Note that values can be less +// than 0, or greater than 1 due to trignometric calculations for location of +// the box. +message NormalizedBoundingPoly { + // Normalized vertices of the bounding polygon. + repeated NormalizedVertex vertices = 1; +} + +// Video segment level annotation results for text detection. +message TextSegment { + // Video segment where a text snippet was detected. + VideoSegment segment = 1; + + // Confidence for the track of detected text. It is calculated as the highest + // over all frames where OCR detected text appears. + float confidence = 2; + + // Information related to the frames where OCR detected text appears. + repeated TextFrame frames = 3; +} + +// Video frame level annotation results for text annotation (OCR). +// Contains information regarding timestamp and bounding box locations for the +// frames containing detected OCR text snippets. +message TextFrame { + // Bounding polygon of the detected text for this frame. + NormalizedBoundingPoly rotated_bounding_box = 1; + + // Timestamp of this frame. + google.protobuf.Duration time_offset = 2; +} + +// Annotations related to one detected OCR text snippet. This will contain the +// corresponding text, confidence value, and frame level information for each +// detection. +message TextAnnotation { + // The detected text. + string text = 1; + + // All video segments where OCR detected text appears. + repeated TextSegment segments = 2; + + // Feature version. + string version = 3; +} + +// Video frame level annotations for object detection and tracking. This field +// stores per frame location, time offset, and confidence. +message ObjectTrackingFrame { + // The normalized bounding box location of this object track for the frame. + NormalizedBoundingBox normalized_bounding_box = 1; + + // The timestamp of the frame in microseconds. + google.protobuf.Duration time_offset = 2; +} + +// Annotations corresponding to one tracked object. +message ObjectTrackingAnnotation { + // Different representation of tracking info in non-streaming batch + // and streaming modes. + oneof track_info { + // Non-streaming batch mode ONLY. + // Each object track corresponds to one video segment where it appears. + VideoSegment segment = 3; + + // Streaming mode ONLY. + // In streaming mode, we do not know the end time of a tracked object + // before it is completed. Hence, there is no VideoSegment info returned. + // Instead, we provide a unique identifiable integer track_id so that + // the customers can correlate the results of the ongoing + // ObjectTrackAnnotation of the same track_id over time. + int64 track_id = 5; + } + + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + // messages in frames. + // Streaming mode: it can only be one ObjectTrackingFrame message in frames. + repeated ObjectTrackingFrame frames = 2; + + // Feature version. + string version = 6; +} + +// Annotation corresponding to one detected, tracked and recognized logo class. +message LogoRecognitionAnnotation { + // Entity category information to specify the logo class that all the logo + // tracks within this LogoRecognitionAnnotation are recognized as. + Entity entity = 1; + + // All logo tracks where the recognized logo appears. Each track corresponds + // to one logo instance appearing in consecutive frames. + repeated Track tracks = 2; + + // All video segments where the recognized logo appears. There might be + // multiple instances of the same logo class appearing in one VideoSegment. + repeated VideoSegment segments = 3; +} diff --git a/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/types/video_intelligence.py b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/types/video_intelligence.py index 0004b15c4286..7586ed64c6ed 100644 --- a/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/types/video_intelligence.py +++ b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1/types/video_intelligence.py @@ -551,10 +551,18 @@ class FaceDetectionAnnotation(proto.Message): r"""Face detection annotation. Attributes: + tracks (Sequence[~.video_intelligence.Track]): + The face tracks with attributes. + thumbnail (bytes): + The thumbnail of a person's face. version (str): Feature version. """ + tracks = proto.RepeatedField(proto.MESSAGE, number=3, message="Track",) + + thumbnail = proto.Field(proto.BYTES, number=4) + version = proto.Field(proto.STRING, number=5) diff --git a/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto new file mode 100644 index 000000000000..690099751da9 --- /dev/null +++ b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto @@ -0,0 +1,408 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.videointelligence.v1beta2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1Beta2"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1beta2;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1beta2"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1beta2"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1beta2"; + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta2/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). A video + // URI may include wildcards in `object-id`, and thus identify multiple + // videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes be included in the face annotation output. + bool include_bounding_boxes = 2; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Video segment level annotation results for face detection. +message FaceSegment { + // Video segment where a face was detected. + VideoSegment segment = 1; +} + +// Video frame level annotation results for face detection. +message FaceFrame { + // Normalized Bounding boxes in a frame. + // There can be more than one boxes if the same face is detected in multiple + // locations within the current frame. + repeated NormalizedBoundingBox normalized_bounding_boxes = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this location. + google.protobuf.Duration time_offset = 2; +} + +// Face annotation. +message FaceAnnotation { + // Thumbnail of a representative face view (in JPEG format). + bytes thumbnail = 1; + + // All video segments where a face was detected. + repeated FaceSegment segments = 2; + + // All video frames where a face was detected. + repeated FaceFrame frames = 3; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Face annotations. There is exactly one element for each unique face. + repeated FaceAnnotation face_annotations = 5; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. + // Guaranteed to be 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection and tracking. + FACE_DETECTION = 4; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto new file mode 100644 index 000000000000..a54bddd07d0e --- /dev/null +++ b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto @@ -0,0 +1,444 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.videointelligence.v1p1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P1Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1p1beta1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p1beta1"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1p1beta1"; + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1p1beta1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig speech_transcription_config = 6; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Output only. Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // Speech transcription. + repeated SpeechTranscription speech_transcriptions = 11; + + // Output only. If set, indicates an error. Note that for a single + // `AnnotateVideoRequest` some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Output only. Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Output only. Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Output only. Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Output only. Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Config for SPEECH_TRANSCRIPTION. +message SpeechTranscriptionConfig { + // Required. *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechTranscription`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. The default 'false' value + // does not add punctuation to result hypotheses. NOTE: "This is currently + // offered as an experimental service, complimentary to all users. In the + // future this may be exclusively available as a premium feature." + bool enable_automatic_punctuation = 5 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +message SpeechContext { + // Optional. A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// A speech recognition result corresponding to a portion of the audio. +message SpeechTranscription { + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. + repeated SpeechRecognitionAlternative alternatives = 1; +} + +// Alternative hypotheses (a.k.a. n-best list). +message SpeechRecognitionAlternative { + // Output only. Transcript text representing the words that the user spoke. + string transcript = 1; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A list of word-specific information for each recognized word. + repeated WordInfo words = 3; +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +message WordInfo { + // Output only. Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration start_time = 1; + + // Output only. Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration end_time = 2; + + // Output only. The word corresponding to this set of information. + string word = 3; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto new file mode 100644 index 000000000000..3b1d51cd7754 --- /dev/null +++ b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto @@ -0,0 +1,476 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.videointelligence.v1p2beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P2Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p2beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1p2beta1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p2beta1"; +option ruby_package = "Google::Cloud::VideoIntelligence::V1p2beta1"; + +// Service that implements Google Cloud Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1p2beta1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Google Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + // A video URI may include wildcards in `object-id`, and thus identify + // multiple videos. Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` should be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via `input_uri`. + // If set, `input_uri` should be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported, which must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see + // [Request URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region + // is specified, a region will be determined based on video file location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for TEXT_DETECTION. + TextDetectionConfig text_detection_config = 8; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e. non-moving) camera. + // When set to true, might improve detection accuracy for moving objects. + // Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for TEXT_DETECTION. +message TextDetectionConfig { + // Language hint can be specified if the language to be detected is known a + // priori. It can increase the accuracy of the detection. Language hint must + // be language code in BCP-47 format. + // + // Automatic language detection is performed if no hint is provided. + repeated string language_hints = 1; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g. `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // E.g. when the label is `Terrier` the category is likely `dog`. And in some + // cases there might be more than one categories e.g. `Terrier` could also be + // a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Label annotations on video level or user specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // OCR text detection and tracking. + // Annotations for list of detected text snippets. Each will have list of + // frame information associated with it. + repeated TextAnnotation text_annotations = 12; + + // Annotations for list of objects detected and tracked in video. + repeated ObjectTrackingAnnotation object_annotations = 14; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Google Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// A vertex represents a 2D point in the image. +// NOTE: the normalized vertex coordinates are relative to the original image +// and range from 0 to 1. +message NormalizedVertex { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; +} + +// Normalized bounding polygon for text (that might not be aligned with axis). +// Contains list of the corner points in clockwise order starting from +// top-left corner. For example, for a rectangular bounding box: +// When the text is horizontal it might look like: +// 0----1 +// | | +// 3----2 +// +// When it's clockwise rotated 180 degrees around the top-left corner it +// becomes: +// 2----3 +// | | +// 1----0 +// +// and the vertex order will still be (0, 1, 2, 3). Note that values can be less +// than 0, or greater than 1 due to trignometric calculations for location of +// the box. +message NormalizedBoundingPoly { + // Normalized vertices of the bounding polygon. + repeated NormalizedVertex vertices = 1; +} + +// Video segment level annotation results for text detection. +message TextSegment { + // Video segment where a text snippet was detected. + VideoSegment segment = 1; + + // Confidence for the track of detected text. It is calculated as the highest + // over all frames where OCR detected text appears. + float confidence = 2; + + // Information related to the frames where OCR detected text appears. + repeated TextFrame frames = 3; +} + +// Video frame level annotation results for text annotation (OCR). +// Contains information regarding timestamp and bounding box locations for the +// frames containing detected OCR text snippets. +message TextFrame { + // Bounding polygon of the detected text for this frame. + NormalizedBoundingPoly rotated_bounding_box = 1; + + // Timestamp of this frame. + google.protobuf.Duration time_offset = 2; +} + +// Annotations related to one detected OCR text snippet. This will contain the +// corresponding text, confidence value, and frame level information for each +// detection. +message TextAnnotation { + // The detected text. + string text = 1; + + // All video segments where OCR detected text appears. + repeated TextSegment segments = 2; +} + +// Video frame level annotations for object detection and tracking. This field +// stores per frame location, time offset, and confidence. +message ObjectTrackingFrame { + // The normalized bounding box location of this object track for the frame. + NormalizedBoundingBox normalized_bounding_box = 1; + + // The timestamp of the frame in microseconds. + google.protobuf.Duration time_offset = 2; +} + +// Annotations corresponding to one tracked object. +message ObjectTrackingAnnotation { + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + repeated ObjectTrackingFrame frames = 2; + + // Each object track corresponds to one video segment where it appears. + VideoSegment segment = 3; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} diff --git a/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto new file mode 100644 index 000000000000..6284e0db3847 --- /dev/null +++ b/packages/google-cloud-videointelligence/google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto @@ -0,0 +1,1089 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.videointelligence.v1p3beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; + +option csharp_namespace = "Google.Cloud.VideoIntelligence.V1P3Beta1"; +option go_package = "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1;videointelligence"; +option java_multiple_files = true; +option java_outer_classname = "VideoIntelligenceServiceProto"; +option java_package = "com.google.cloud.videointelligence.v1p3beta1"; +option php_namespace = "Google\\Cloud\\VideoIntelligence\\V1p3beta1"; + +// Service that implements the Video Intelligence API. +service VideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs asynchronous video annotation. Progress and results can be + // retrieved through the `google.longrunning.Operations` interface. + // `Operation.metadata` contains `AnnotateVideoProgress` (progress). + // `Operation.response` contains `AnnotateVideoResponse` (results). + rpc AnnotateVideo(AnnotateVideoRequest) + returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1p3beta1/videos:annotate" + body: "*" + }; + option (google.api.method_signature) = "input_uri,features"; + option (google.longrunning.operation_info) = { + response_type: "AnnotateVideoResponse" + metadata_type: "AnnotateVideoProgress" + }; + } +} + +// Service that implements streaming Video Intelligence API. +service StreamingVideoIntelligenceService { + option (google.api.default_host) = "videointelligence.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform"; + + // Performs video annotation with bidirectional streaming: emitting results + // while sending video/audio bytes. + // This method is only available via the gRPC API (not REST). + rpc StreamingAnnotateVideo(stream StreamingAnnotateVideoRequest) + returns (stream StreamingAnnotateVideoResponse) {} +} + +// Video annotation request. +message AnnotateVideoRequest { + // Input video location. Currently, only + // [Cloud Storage](https://cloud.google.com/storage/) URIs are + // supported. URIs must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify + // multiple videos, a video URI may include wildcards in the `object-id`. + // Supported wildcards: '*' to match 0 or more characters; + // '?' to match 1 character. If unset, the input video should be embedded + // in the request as `input_content`. If set, `input_content` must be unset. + string input_uri = 1; + + // The video data bytes. + // If unset, the input video(s) should be specified via the `input_uri`. + // If set, `input_uri` must be unset. + bytes input_content = 6; + + // Required. Requested video annotation features. + repeated Feature features = 2 [(google.api.field_behavior) = REQUIRED]; + + // Additional video context and/or feature-specific parameters. + VideoContext video_context = 3; + + // Optional. Location where the output (in JSON format) should be stored. + // Currently, only [Cloud Storage](https://cloud.google.com/storage/) + // URIs are supported. These must be specified in the following format: + // `gs://bucket-id/object-id` (other URI formats return + // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For + // more information, see [Request + // URIs](https://cloud.google.com/storage/docs/request-endpoints). + string output_uri = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Cloud region where annotation should take place. Supported cloud + // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no + // region is specified, the region will be determined based on video file + // location. + string location_id = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Video context and/or feature-specific parameters. +message VideoContext { + // Video segments to annotate. The segments may overlap and are not required + // to be contiguous or span the whole video. If unspecified, each video is + // treated as a single segment. + repeated VideoSegment segments = 1; + + // Config for LABEL_DETECTION. + LabelDetectionConfig label_detection_config = 2; + + // Config for SHOT_CHANGE_DETECTION. + ShotChangeDetectionConfig shot_change_detection_config = 3; + + // Config for EXPLICIT_CONTENT_DETECTION. + ExplicitContentDetectionConfig explicit_content_detection_config = 4; + + // Config for FACE_DETECTION. + FaceDetectionConfig face_detection_config = 5; + + // Config for SPEECH_TRANSCRIPTION. + SpeechTranscriptionConfig speech_transcription_config = 6; + + // Config for TEXT_DETECTION. + TextDetectionConfig text_detection_config = 8; + + // Config for PERSON_DETECTION. + PersonDetectionConfig person_detection_config = 11; + + // Config for OBJECT_TRACKING. + ObjectTrackingConfig object_tracking_config = 13; +} + +// Label detection mode. +enum LabelDetectionMode { + // Unspecified. + LABEL_DETECTION_MODE_UNSPECIFIED = 0; + + // Detect shot-level labels. + SHOT_MODE = 1; + + // Detect frame-level labels. + FRAME_MODE = 2; + + // Detect both shot-level and frame-level labels. + SHOT_AND_FRAME_MODE = 3; +} + +// Bucketized representation of likelihood. +enum Likelihood { + // Unspecified likelihood. + LIKELIHOOD_UNSPECIFIED = 0; + + // Very unlikely. + VERY_UNLIKELY = 1; + + // Unlikely. + UNLIKELY = 2; + + // Possible. + POSSIBLE = 3; + + // Likely. + LIKELY = 4; + + // Very likely. + VERY_LIKELY = 5; +} + +// Config for LABEL_DETECTION. +message LabelDetectionConfig { + // What labels should be detected with LABEL_DETECTION, in addition to + // video-level labels or segment-level labels. + // If unspecified, defaults to `SHOT_MODE`. + LabelDetectionMode label_detection_mode = 1; + + // Whether the video has been shot from a stationary (i.e., non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. + bool stationary_camera = 2; + + // Model to use for label detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 3; + + // The confidence threshold we perform filtering on the labels from + // frame-level detection. If not set, it is set to 0.4 by default. The valid + // range for this threshold is [0.1, 0.9]. Any value set outside of this + // range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float frame_confidence_threshold = 4; + + // The confidence threshold we perform filtering on the labels from + // video-level and shot-level detections. If not set, it's set to 0.3 by + // default. The valid range for this threshold is [0.1, 0.9]. Any value set + // outside of this range will be clipped. + // Note: For best results, follow the default threshold. We will update + // the default threshold everytime when we release a new model. + float video_confidence_threshold = 5; +} + +// Streaming video annotation feature. +enum StreamingFeature { + // Unspecified. + STREAMING_FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + STREAMING_LABEL_DETECTION = 1; + + // Shot change detection. + STREAMING_SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + STREAMING_EXPLICIT_CONTENT_DETECTION = 3; + + // Object detection and tracking. + STREAMING_OBJECT_TRACKING = 4; + + // Action recognition based on AutoML model. + STREAMING_AUTOML_ACTION_RECOGNITION = 23; + + // Video classification based on AutoML model. + STREAMING_AUTOML_CLASSIFICATION = 21; + + // Object detection and tracking based on AutoML model. + STREAMING_AUTOML_OBJECT_TRACKING = 22; +} + +// Video annotation feature. +enum Feature { + // Unspecified. + FEATURE_UNSPECIFIED = 0; + + // Label detection. Detect objects, such as dog or flower. + LABEL_DETECTION = 1; + + // Shot change detection. + SHOT_CHANGE_DETECTION = 2; + + // Explicit content detection. + EXPLICIT_CONTENT_DETECTION = 3; + + // Human face detection. + FACE_DETECTION = 4; + + // Speech transcription. + SPEECH_TRANSCRIPTION = 6; + + // OCR text detection and tracking. + TEXT_DETECTION = 7; + + // Object detection and tracking. + OBJECT_TRACKING = 9; + + // Logo detection, tracking, and recognition. + LOGO_RECOGNITION = 12; + + // Celebrity recognition. + CELEBRITY_RECOGNITION = 13; + + // Person detection. + PERSON_DETECTION = 14; +} + +// Config for SHOT_CHANGE_DETECTION. +message ShotChangeDetectionConfig { + // Model to use for shot change detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for OBJECT_TRACKING. +message ObjectTrackingConfig { + // Model to use for object tracking. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for EXPLICIT_CONTENT_DETECTION. +message ExplicitContentDetectionConfig { + // Model to use for explicit content detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; +} + +// Config for FACE_DETECTION. +message FaceDetectionConfig { + // Model to use for face detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 1; + + // Whether bounding boxes are included in the face annotation output. + bool include_bounding_boxes = 2; + + // Whether to enable face attributes detection, such as glasses, dark_glasses, + // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 5; +} + +// Config for PERSON_DETECTION. +message PersonDetectionConfig { + // Whether bounding boxes are included in the person detection annotation + // output. + bool include_bounding_boxes = 1; + + // Whether to enable pose landmarks detection. Ignored if + // 'include_bounding_boxes' is set to false. + bool include_pose_landmarks = 2; + + // Whether to enable person attributes detection, such as cloth color (black, + // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, + // etc. + // Ignored if 'include_bounding_boxes' is set to false. + bool include_attributes = 3; +} + +// Config for TEXT_DETECTION. +message TextDetectionConfig { + // Language hint can be specified if the language to be detected is known a + // priori. It can increase the accuracy of the detection. Language hint must + // be language code in BCP-47 format. + // + // Automatic language detection is performed if no hint is provided. + repeated string language_hints = 1; + + // Model to use for text detection. + // Supported values: "builtin/stable" (the default if unset) and + // "builtin/latest". + string model = 2; +} + +// Video segment. +message VideoSegment { + // Time-offset, relative to the beginning of the video, + // corresponding to the start of the segment (inclusive). + google.protobuf.Duration start_time_offset = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the end of the segment (inclusive). + google.protobuf.Duration end_time_offset = 2; +} + +// Video segment level annotation results for label detection. +message LabelSegment { + // Video segment where a label was detected. + VideoSegment segment = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Video frame level annotation results for label detection. +message LabelFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Confidence that the label is accurate. Range: [0, 1]. + float confidence = 2; +} + +// Detected entity from video analysis. +message Entity { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search + // API](https://developers.google.com/knowledge-graph/). + string entity_id = 1; + + // Textual description, e.g., `Fixed-gear bicycle`. + string description = 2; + + // Language code for `description` in BCP-47 format. + string language_code = 3; +} + +// Label annotation. +message LabelAnnotation { + // Detected entity. + Entity entity = 1; + + // Common categories for the detected entity. + // For example, when the label is `Terrier`, the category is likely `dog`. And + // in some cases there might be more than one categories e.g., `Terrier` could + // also be a `pet`. + repeated Entity category_entities = 2; + + // All video segments where a label was detected. + repeated LabelSegment segments = 3; + + // All video frames where a label was detected. + repeated LabelFrame frames = 4; +} + +// Video frame level annotation results for explicit content. +message ExplicitContentFrame { + // Time-offset, relative to the beginning of the video, corresponding to the + // video frame for this location. + google.protobuf.Duration time_offset = 1; + + // Likelihood of the pornography content.. + Likelihood pornography_likelihood = 2; +} + +// Explicit content annotation (based on per-frame visual signals only). +// If no explicit content has been detected in a frame, no annotations are +// present for that frame. +message ExplicitContentAnnotation { + // All video frames where explicit content was detected. + repeated ExplicitContentFrame frames = 1; +} + +// Normalized bounding box. +// The normalized vertex coordinates are relative to the original image. +// Range: [0, 1]. +message NormalizedBoundingBox { + // Left X coordinate. + float left = 1; + + // Top Y coordinate. + float top = 2; + + // Right X coordinate. + float right = 3; + + // Bottom Y coordinate. + float bottom = 4; +} + +// For tracking related features. +// An object at time_offset with attributes, and located with +// normalized_bounding_box. +message TimestampedObject { + // Normalized Bounding box in a frame, where the object is located. + NormalizedBoundingBox normalized_bounding_box = 1; + + // Time-offset, relative to the beginning of the video, + // corresponding to the video frame for this object. + google.protobuf.Duration time_offset = 2; + + // Optional. The attributes of the object in the bounding box. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The detected landmarks. + repeated DetectedLandmark landmarks = 4 + [(google.api.field_behavior) = OPTIONAL]; +} + +// A track of an object instance. +message Track { + // Video segment of a track. + VideoSegment segment = 1; + + // The object with timestamp and attributes per frame in the track. + repeated TimestampedObject timestamped_objects = 2; + + // Optional. Attributes in the track level. + repeated DetectedAttribute attributes = 3 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The confidence score of the tracked object. + float confidence = 4 [(google.api.field_behavior) = OPTIONAL]; +} + +// A generic detected attribute represented by name in string format. +message DetectedAttribute { + // The name of the attribute, for example, glasses, dark_glasses, mouth_open. + // A full list of supported type names will be provided in the document. + string name = 1; + + // Detected attribute confidence. Range [0, 1]. + float confidence = 2; + + // Text value of the detection result. For example, the value for "HairColor" + // can be "black", "blonde", etc. + string value = 3; +} + +// Celebrity definition. +message Celebrity { + // The resource name of the celebrity. Have the format + // `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery. + // kg-mid is the id in Google knowledge graph, which is unique for the + // celebrity. + string name = 1; + + // The celebrity name. + string display_name = 2; + + // Textual description of additional information about the celebrity, if + // applicable. + string description = 3; +} + +// The annotation result of a celebrity face track. RecognizedCelebrity field +// could be empty if the face track does not have any matched celebrities. +message CelebrityTrack { + // The recognized celebrity with confidence score. + message RecognizedCelebrity { + // The recognized celebrity. + Celebrity celebrity = 1; + + // Recognition confidence. Range [0, 1]. + float confidence = 2; + } + + // Top N match of the celebrities for the face in this track. + repeated RecognizedCelebrity celebrities = 1; + + // A track of a person's face. + Track face_track = 3; +} + +// Celebrity recognition annotation per video. +message CelebrityRecognitionAnnotation { + // The tracks detected from the input video, including recognized celebrities + // and other detected faces in the video. + repeated CelebrityTrack celebrity_tracks = 1; +} + +// A generic detected landmark represented by name in string format and a 2D +// location. +message DetectedLandmark { + // The name of this landmark, for example, left_hand, right_shoulder. + string name = 1; + + // The 2D point of the detected landmark using the normalized image + // coordindate system. The normalized coordinates have the range from 0 to 1. + NormalizedVertex point = 2; + + // The confidence score of the detected landmark. Range [0, 1]. + float confidence = 3; +} + +// Face detection annotation. +message FaceDetectionAnnotation { + // The face tracks with attributes. + repeated Track tracks = 3; + + // The thumbnail of a person's face. + bytes thumbnail = 4; +} + +// Person detection annotation per video. +message PersonDetectionAnnotation { + // The detected tracks of a person. + repeated Track tracks = 1; +} + +// Annotation results for a single video. +message VideoAnnotationResults { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Video segment on which the annotation is run. + VideoSegment segment = 10; + + // Topical label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. + repeated LabelAnnotation segment_label_annotations = 2; + + // Presence label annotations on video level or user-specified segment level. + // There is exactly one element for each unique label. Compared to the + // existing topical `segment_label_annotations`, this field presents more + // fine-grained, segment-level labels detected in video content and is made + // available only when the client sets `LabelDetectionConfig.model` to + // "builtin/latest" in the request. + repeated LabelAnnotation segment_presence_label_annotations = 23; + + // Topical label annotations on shot level. + // There is exactly one element for each unique label. + repeated LabelAnnotation shot_label_annotations = 3; + + // Presence label annotations on shot level. There is exactly one element for + // each unique label. Compared to the existing topical + // `shot_label_annotations`, this field presents more fine-grained, shot-level + // labels detected in video content and is made available only when the client + // sets `LabelDetectionConfig.model` to "builtin/latest" in the request. + repeated LabelAnnotation shot_presence_label_annotations = 24; + + // Label annotations on frame level. + // There is exactly one element for each unique label. + repeated LabelAnnotation frame_label_annotations = 4; + + // Face detection annotations. + repeated FaceDetectionAnnotation face_detection_annotations = 13; + + // Shot annotations. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 6; + + // Explicit content annotation. + ExplicitContentAnnotation explicit_annotation = 7; + + // Speech transcription. + repeated SpeechTranscription speech_transcriptions = 11; + + // OCR text detection and tracking. + // Annotations for list of detected text snippets. Each will have list of + // frame information associated with it. + repeated TextAnnotation text_annotations = 12; + + // Annotations for list of objects detected and tracked in video. + repeated ObjectTrackingAnnotation object_annotations = 14; + + // Annotations for list of logos detected, tracked and recognized in video. + repeated LogoRecognitionAnnotation logo_recognition_annotations = 19; + + // Person detection annotations. + repeated PersonDetectionAnnotation person_detection_annotations = 20; + + // Celebrity recognition annotations. + CelebrityRecognitionAnnotation celebrity_recognition_annotations = 21; + + // If set, indicates an error. Note that for a single `AnnotateVideoRequest` + // some videos may succeed and some may fail. + google.rpc.Status error = 9; +} + +// Video annotation response. Included in the `response` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoResponse { + // Annotation results for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationResults annotation_results = 1; +} + +// Annotation progress for a single video. +message VideoAnnotationProgress { + // Video file location in + // [Cloud Storage](https://cloud.google.com/storage/). + string input_uri = 1; + + // Approximate percentage processed thus far. Guaranteed to be + // 100 when fully processed. + int32 progress_percent = 2; + + // Time when the request was received. + google.protobuf.Timestamp start_time = 3; + + // Time of the most recent update. + google.protobuf.Timestamp update_time = 4; + + // Specifies which feature is being tracked if the request contains more than + // one feature. + Feature feature = 5; + + // Specifies which segment is being tracked if the request contains more than + // one segment. + VideoSegment segment = 6; +} + +// Video annotation progress. Included in the `metadata` +// field of the `Operation` returned by the `GetOperation` +// call of the `google::longrunning::Operations` service. +message AnnotateVideoProgress { + // Progress metadata for all videos specified in `AnnotateVideoRequest`. + repeated VideoAnnotationProgress annotation_progress = 1; +} + +// Config for SPEECH_TRANSCRIPTION. +message SpeechTranscriptionConfig { + // Required. *Required* The language of the supplied audio as a + // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. + // Example: "en-US". + // See [Language Support](https://cloud.google.com/speech/docs/languages) + // for a list of the currently supported language codes. + string language_code = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Maximum number of recognition hypotheses to be returned. + // Specifically, the maximum number of `SpeechRecognitionAlternative` messages + // within each `SpeechTranscription`. The server may return fewer than + // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will + // return a maximum of one. If omitted, will return a maximum of one. + int32 max_alternatives = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set to `true`, the server will attempt to filter out + // profanities, replacing all but the initial character in each filtered word + // with asterisks, e.g. "f***". If set to `false` or omitted, profanities + // won't be filtered out. + bool filter_profanity = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. A means to provide context to assist the speech recognition. + repeated SpeechContext speech_contexts = 4 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', adds punctuation to recognition result hypotheses. + // This feature is only available in select languages. Setting this for + // requests in other languages has no effect at all. The default 'false' value + // does not add punctuation to result hypotheses. NOTE: "This is currently + // offered as an experimental service, complimentary to all users. In the + // future this may be exclusively available as a premium feature." + bool enable_automatic_punctuation = 5 + [(google.api.field_behavior) = OPTIONAL]; + + // Optional. For file formats, such as MXF or MKV, supporting multiple audio + // tracks, specify up to two tracks. Default: track 0. + repeated int32 audio_tracks = 6 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If 'true', enables speaker detection for each recognized word in + // the top alternative of the recognition result using a speaker_tag provided + // in the WordInfo. + // Note: When this is true, we send all the words from the beginning of the + // audio for the top alternative in every consecutive response. + // This is done in order to improve our speaker tags as our models learn to + // identify the speakers in the conversation over time. + bool enable_speaker_diarization = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If set, specifies the estimated number of speakers in the + // conversation. If not set, defaults to '2'. Ignored unless + // enable_speaker_diarization is set to true. + int32 diarization_speaker_count = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. If `true`, the top result includes a list of words and the + // confidence for those words. If `false`, no word-level confidence + // information is returned. The default is `false`. + bool enable_word_confidence = 9 [(google.api.field_behavior) = OPTIONAL]; +} + +// Provides "hints" to the speech recognizer to favor specific words and phrases +// in the results. +message SpeechContext { + // Optional. A list of strings containing words and phrases "hints" so that + // the speech recognition is more likely to recognize them. This can be used + // to improve the accuracy for specific words and phrases, for example, if + // specific commands are typically spoken by the user. This can also be used + // to add additional words to the vocabulary of the recognizer. See + // [usage limits](https://cloud.google.com/speech/limits#content). + repeated string phrases = 1 [(google.api.field_behavior) = OPTIONAL]; +} + +// A speech recognition result corresponding to a portion of the audio. +message SpeechTranscription { + // May contain one or more recognition hypotheses (up to the maximum specified + // in `max_alternatives`). These alternatives are ordered in terms of + // accuracy, with the top (first) alternative being the most probable, as + // ranked by the recognizer. + repeated SpeechRecognitionAlternative alternatives = 1; + + // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) + // language tag of the language in this result. This language code was + // detected to have the most likelihood of being spoken in the audio. + string language_code = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Alternative hypotheses (a.k.a. n-best list). +message SpeechRecognitionAlternative { + // Transcript text representing the words that the user spoke. + string transcript = 1; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A list of word-specific information for each recognized word. + // Note: When `enable_speaker_diarization` is set to true, you will see all + // the words from the beginning of the audio. + repeated WordInfo words = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Word-specific information for recognized words. Word information is only +// included in the response when certain request parameters are set, such +// as `enable_word_time_offsets`. +message WordInfo { + // Time offset relative to the beginning of the audio, and + // corresponding to the start of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration start_time = 1; + + // Time offset relative to the beginning of the audio, and + // corresponding to the end of the spoken word. This field is only set if + // `enable_word_time_offsets=true` and only in the top hypothesis. This is an + // experimental feature and the accuracy of the time offset can vary. + google.protobuf.Duration end_time = 2; + + // The word corresponding to this set of information. + string word = 3; + + // Output only. The confidence estimate between 0.0 and 1.0. A higher number + // indicates an estimated greater likelihood that the recognized words are + // correct. This field is set only for the top alternative. + // This field is not guaranteed to be accurate and users should not rely on it + // to be always provided. + // The default of 0.0 is a sentinel value indicating `confidence` was not set. + float confidence = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A distinct integer value is assigned for every speaker within + // the audio. This field specifies which one of those speakers was detected to + // have spoken this word. Value ranges from 1 up to diarization_speaker_count, + // and is only set if speaker diarization is enabled. + int32 speaker_tag = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A vertex represents a 2D point in the image. +// NOTE: the normalized vertex coordinates are relative to the original image +// and range from 0 to 1. +message NormalizedVertex { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; +} + +// Normalized bounding polygon for text (that might not be aligned with axis). +// Contains list of the corner points in clockwise order starting from +// top-left corner. For example, for a rectangular bounding box: +// When the text is horizontal it might look like: +// 0----1 +// | | +// 3----2 +// +// When it's clockwise rotated 180 degrees around the top-left corner it +// becomes: +// 2----3 +// | | +// 1----0 +// +// and the vertex order will still be (0, 1, 2, 3). Note that values can be less +// than 0, or greater than 1 due to trignometric calculations for location of +// the box. +message NormalizedBoundingPoly { + // Normalized vertices of the bounding polygon. + repeated NormalizedVertex vertices = 1; +} + +// Video segment level annotation results for text detection. +message TextSegment { + // Video segment where a text snippet was detected. + VideoSegment segment = 1; + + // Confidence for the track of detected text. It is calculated as the highest + // over all frames where OCR detected text appears. + float confidence = 2; + + // Information related to the frames where OCR detected text appears. + repeated TextFrame frames = 3; +} + +// Video frame level annotation results for text annotation (OCR). +// Contains information regarding timestamp and bounding box locations for the +// frames containing detected OCR text snippets. +message TextFrame { + // Bounding polygon of the detected text for this frame. + NormalizedBoundingPoly rotated_bounding_box = 1; + + // Timestamp of this frame. + google.protobuf.Duration time_offset = 2; +} + +// Annotations related to one detected OCR text snippet. This will contain the +// corresponding text, confidence value, and frame level information for each +// detection. +message TextAnnotation { + // The detected text. + string text = 1; + + // All video segments where OCR detected text appears. + repeated TextSegment segments = 2; +} + +// Video frame level annotations for object detection and tracking. This field +// stores per frame location, time offset, and confidence. +message ObjectTrackingFrame { + // The normalized bounding box location of this object track for the frame. + NormalizedBoundingBox normalized_bounding_box = 1; + + // The timestamp of the frame in microseconds. + google.protobuf.Duration time_offset = 2; +} + +// Annotations corresponding to one tracked object. +message ObjectTrackingAnnotation { + // Different representation of tracking info in non-streaming batch + // and streaming modes. + oneof track_info { + // Non-streaming batch mode ONLY. + // Each object track corresponds to one video segment where it appears. + VideoSegment segment = 3; + + // Streaming mode ONLY. + // In streaming mode, we do not know the end time of a tracked object + // before it is completed. Hence, there is no VideoSegment info returned. + // Instead, we provide a unique identifiable integer track_id so that + // the customers can correlate the results of the ongoing + // ObjectTrackAnnotation of the same track_id over time. + int64 track_id = 5; + } + + // Entity to specify the object category that this track is labeled as. + Entity entity = 1; + + // Object category's labeling confidence of this track. + float confidence = 4; + + // Information corresponding to all frames where this object track appears. + // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame + // messages in frames. + // Streaming mode: it can only be one ObjectTrackingFrame message in frames. + repeated ObjectTrackingFrame frames = 2; +} + +// Annotation corresponding to one detected, tracked and recognized logo class. +message LogoRecognitionAnnotation { + // Entity category information to specify the logo class that all the logo + // tracks within this LogoRecognitionAnnotation are recognized as. + Entity entity = 1; + + // All logo tracks where the recognized logo appears. Each track corresponds + // to one logo instance appearing in consecutive frames. + repeated Track tracks = 2; + + // All video segments where the recognized logo appears. There might be + // multiple instances of the same logo class appearing in one VideoSegment. + repeated VideoSegment segments = 3; +} + +// The top-level message sent by the client for the `StreamingAnnotateVideo` +// method. Multiple `StreamingAnnotateVideoRequest` messages are sent. +// The first message must only contain a `StreamingVideoConfig` message. +// All subsequent messages must only contain `input_content` data. +message StreamingAnnotateVideoRequest { + // *Required* The streaming request, which is either a streaming config or + // video content. + oneof streaming_request { + // Provides information to the annotator, specifing how to process the + // request. The first `AnnotateStreamingVideoRequest` message must only + // contain a `video_config` message. + StreamingVideoConfig video_config = 1; + + // The video data to be annotated. Chunks of video data are sequentially + // sent in `StreamingAnnotateVideoRequest` messages. Except the initial + // `StreamingAnnotateVideoRequest` message containing only + // `video_config`, all subsequent `AnnotateStreamingVideoRequest` + // messages must only contain `input_content` field. + // Note: as with all bytes fields, protobuffers use a pure binary + // representation (not base64). + bytes input_content = 2; + } +} + +// Provides information to the annotator that specifies how to process the +// request. +message StreamingVideoConfig { + // Config for requested annotation feature. + oneof streaming_config { + // Config for STREAMING_SHOT_CHANGE_DETECTION. + StreamingShotChangeDetectionConfig shot_change_detection_config = 2; + + // Config for STREAMING_LABEL_DETECTION. + StreamingLabelDetectionConfig label_detection_config = 3; + + // Config for STREAMING_EXPLICIT_CONTENT_DETECTION. + StreamingExplicitContentDetectionConfig explicit_content_detection_config = + 4; + + // Config for STREAMING_OBJECT_TRACKING. + StreamingObjectTrackingConfig object_tracking_config = 5; + + // Config for STREAMING_AUTOML_ACTION_RECOGNITION. + StreamingAutomlActionRecognitionConfig automl_action_recognition_config = + 23; + + // Config for STREAMING_AUTOML_CLASSIFICATION. + StreamingAutomlClassificationConfig automl_classification_config = 21; + + // Config for STREAMING_AUTOML_OBJECT_TRACKING. + StreamingAutomlObjectTrackingConfig automl_object_tracking_config = 22; + } + + // Requested annotation feature. + StreamingFeature feature = 1; + + // Streaming storage option. By default: storage is disabled. + StreamingStorageConfig storage_config = 30; +} + +// `StreamingAnnotateVideoResponse` is the only message returned to the client +// by `StreamingAnnotateVideo`. A series of zero or more +// `StreamingAnnotateVideoResponse` messages are streamed back to the client. +message StreamingAnnotateVideoResponse { + // If set, returns a [google.rpc.Status][google.rpc.Status] message that + // specifies the error for the operation. + google.rpc.Status error = 1; + + // Streaming annotation results. + StreamingVideoAnnotationResults annotation_results = 2; + + // Google Cloud Storage(GCS) URI that stores annotation results of one + // streaming session in JSON format. + // It is the annotation_result_storage_directory + // from the request followed by '/cloud_project_number-session_id'. + string annotation_results_uri = 3; +} + +// Streaming annotation results corresponding to a portion of the video +// that is currently being processed. +message StreamingVideoAnnotationResults { + // Shot annotation results. Each shot is represented as a video segment. + repeated VideoSegment shot_annotations = 1; + + // Label annotation results. + repeated LabelAnnotation label_annotations = 2; + + // Explicit content annotation results. + ExplicitContentAnnotation explicit_annotation = 3; + + // Object tracking results. + repeated ObjectTrackingAnnotation object_annotations = 4; +} + +// Config for STREAMING_SHOT_CHANGE_DETECTION. +message StreamingShotChangeDetectionConfig {} + +// Config for STREAMING_LABEL_DETECTION. +message StreamingLabelDetectionConfig { + // Whether the video has been captured from a stationary (i.e. non-moving) + // camera. When set to true, might improve detection accuracy for moving + // objects. Default: false. + bool stationary_camera = 1; +} + +// Config for STREAMING_EXPLICIT_CONTENT_DETECTION. +message StreamingExplicitContentDetectionConfig {} + +// Config for STREAMING_OBJECT_TRACKING. +message StreamingObjectTrackingConfig {} + +// Config for STREAMING_AUTOML_ACTION_RECOGNITION. +message StreamingAutomlActionRecognitionConfig { + // Resource name of AutoML model. + // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} + +// Config for STREAMING_AUTOML_CLASSIFICATION. +message StreamingAutomlClassificationConfig { + // Resource name of AutoML model. + // Format: + // `projects/{project_number}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} + +// Config for STREAMING_AUTOML_OBJECT_TRACKING. +message StreamingAutomlObjectTrackingConfig { + // Resource name of AutoML model. + // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` + string model_name = 1; +} + +// Config for streaming storage option. +message StreamingStorageConfig { + // Enable streaming storage. Default: false. + bool enable_storage_annotation_result = 1; + + // Cloud Storage URI to store all annotation results for one client. Client + // should specify this field as the top-level storage directory. Annotation + // results of different sessions will be put into different sub-directories + // denoted by project_name and session_id. All sub-directories will be auto + // generated by program and will be made accessible to client in response + // proto. URIs must be specified in the following format: + // `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage + // bucket created by client and bucket permission shall also be configured + // properly. `object-id` can be arbitrary string that make sense to client. + // Other URI formats will return error and cause Cloud Storage write failure. + string annotation_result_storage_directory = 3; +} diff --git a/packages/google-cloud-videointelligence/noxfile.py b/packages/google-cloud-videointelligence/noxfile.py index 78d077a56bd1..9efa42e493b7 100644 --- a/packages/google-cloud-videointelligence/noxfile.py +++ b/packages/google-cloud-videointelligence/noxfile.py @@ -28,7 +28,7 @@ DEFAULT_PYTHON_VERSION = "3.8" SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/packages/google-cloud-videointelligence/samples/analyze/README.rst b/packages/google-cloud-videointelligence/samples/analyze/README.rst deleted file mode 100644 index 05a1557c8c83..000000000000 --- a/packages/google-cloud-videointelligence/samples/analyze/README.rst +++ /dev/null @@ -1,195 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Video Intelligence API Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=video/cloud-client/analyze/README.rst - - -This directory contains samples for Google Cloud Video Intelligence API. `Google Cloud Video Intelligence API`_ allows developers to easily integrate feature detection in video. - - - - -.. _Google Cloud Video Intelligence API: https://cloud.google.com/video-intelligence/docs - - - - - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -analyze -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=video/cloud-client/analyze/analyze.py,video/cloud-client/analyze/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python analyze.py - - usage: analyze.py [-h] - {labels,labels_file,explicit_content,shots,transcribe,text_gcs,text_file,objects_gcs,objects_file} - ... - - This application demonstrates label detection, - explicit content, and shot change detection using the Google Cloud API. - - Usage Examples: - - python analyze.py labels gs://cloud-samples-data/video/chicago.mp4 - python analyze.py labels_file resources/cat.mp4 - python analyze.py shots gs://cloud-samples-data/video/gbikes_dinosaur.mp4 - python analyze.py explicit_content gs://cloud-samples-data/video/gbikes_dinosaur.mp4 - python analyze.py text_gcs gs://cloud-samples-data/video/googlework_tiny.mp4 - python analyze.py text_file resources/googlework_tiny.mp4 - python analyze.py objects_gcs gs://cloud-samples-data/video/cat.mp4 - python analyze.py objects_file resources/cat.mp4 - - positional arguments: - {labels,labels_file,explicit_content,shots,transcribe,text_gcs,text_file,objects_gcs,objects_file} - labels Detects labels given a GCS path. - labels_file Detect labels given a file path. - explicit_content Detects explicit content from the GCS path to a video. - shots Detects camera shot changes. - transcribe Transcribe speech from a video stored on GCS. - text_gcs Detect text in a video stored on GCS. - text_file Detect text in a local video. - objects_gcs Object tracking in a video stored on GCS. - objects_file Object tracking in a local video. - - optional arguments: - -h, --help show this help message and exit - - - -beta samples -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=video/cloud-client/analyze/beta_snippets.py,video/cloud-client/analyze/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python beta_snippets.py - - usage: beta_snippets.py [-h] - {transcription,video-text-gcs,video-text,track-objects-gcs,track-objects,streaming-labels,streaming-shot-change,streaming-objects,streaming-explicit-content,streaming-annotation-storage,streaming-automl-classification} - ... - - This application demonstrates speech transcription using the - Google Cloud API. - - Usage Examples: - python beta_snippets.py transcription gs://python-docs-samples-tests/video/googlework_tiny.mp4 - - python beta_snippets.py video-text-gcs gs://python-docs-samples-tests/video/googlework_tiny.mp4 - - python beta_snippets.py track-objects resources/cat.mp4 - - python beta_snippets.py streaming-labels resources/cat.mp4 - - python beta_snippets.py streaming-shot-change resources/cat.mp4 - - python beta_snippets.py streaming-objects resources/cat.mp4 - - python beta_snippets.py streaming-explicit-content resources/cat.mp4 - - python beta_snippets.py streaming-annotation-storage resources/cat.mp4 gs://mybucket/myfolder - - python beta_snippets.py streaming-automl-classification resources/cat.mp4 $PROJECT_ID $MODEL_ID - - positional arguments: - {transcription,video-text-gcs,video-text,track-objects-gcs,track-objects,streaming-labels,streaming-shot-change,streaming-objects,streaming-explicit-content,streaming-annotation-storage,streaming-automl-classification} - transcription Transcribe speech from a video stored on GCS. - video-text-gcs Detect text in a video stored on GCS. - video-text Detect text in a local video. - track-objects-gcs Object Tracking. - track-objects Object Tracking. - streaming-labels - streaming-shot-change - streaming-objects - streaming-explicit-content - streaming-annotation-storage - streaming-automl-classification - - optional arguments: - -h, --help show this help message and exit - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/packages/google-cloud-videointelligence/samples/analyze/noxfile.py b/packages/google-cloud-videointelligence/samples/analyze/noxfile.py index ab2c49227c3b..b90eef00f2d9 100644 --- a/packages/google-cloud-videointelligence/samples/analyze/noxfile.py +++ b/packages/google-cloud-videointelligence/samples/analyze/noxfile.py @@ -37,25 +37,28 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints # All new samples should feature them - "enforce_type_hints": False, + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -70,12 +73,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -84,7 +87,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -133,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG["enforce_type_hints"]: + if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -142,11 +145,9 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) - - # # Black # @@ -159,7 +160,6 @@ def blacken(session): session.run("black", *python_files) - # # Sample Tests # @@ -199,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/packages/google-cloud-videointelligence/samples/labels/README.rst b/packages/google-cloud-videointelligence/samples/labels/README.rst deleted file mode 100644 index 1dcc76887147..000000000000 --- a/packages/google-cloud-videointelligence/samples/labels/README.rst +++ /dev/null @@ -1,135 +0,0 @@ - -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Video Intelligence API Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=video/cloud-client/labels/README.rst - - -This directory contains samples for Google Cloud Video Intelligence API. `Google Cloud Video Intelligence API`_ allows developers to easily integrate feature detection in video. - - - - -.. _Google Cloud Video Intelligence API: https://cloud.google.com/video-intelligence/docs - - -Setup -------------------------------------------------------------------------------- - - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - - - - -Install Dependencies -++++++++++++++++++++ - -#. Clone python-docs-samples and change directory to the sample directory you want to use. - - .. code-block:: bash - - $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 3.6+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - - - - - - -Samples -------------------------------------------------------------------------------- - - -labels -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=video/cloud-client/labels/labels.py,video/cloud-client/labels/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python labels.py - - - usage: labels.py [-h] path - - This application demonstrates how to detect labels from a video - based on the image content with the Google Cloud Video Intelligence - API. - - For more information, check out the documentation at - https://cloud.google.com/videointelligence/docs. - - Usage Example: - - python labels.py gs://cloud-ml-sandbox/video/chicago.mp4 - - positional arguments: - path GCS file path for label detection. - - optional arguments: - -h, --help show this help message and exit - - - - - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/packages/google-cloud-videointelligence/samples/labels/noxfile.py b/packages/google-cloud-videointelligence/samples/labels/noxfile.py index ab2c49227c3b..b90eef00f2d9 100644 --- a/packages/google-cloud-videointelligence/samples/labels/noxfile.py +++ b/packages/google-cloud-videointelligence/samples/labels/noxfile.py @@ -37,25 +37,28 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints # All new samples should feature them - "enforce_type_hints": False, + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -70,12 +73,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -84,7 +87,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -133,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG["enforce_type_hints"]: + if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -142,11 +145,9 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) - - # # Black # @@ -159,7 +160,6 @@ def blacken(session): session.run("black", *python_files) - # # Sample Tests # @@ -199,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/packages/google-cloud-videointelligence/samples/quickstart/noxfile.py b/packages/google-cloud-videointelligence/samples/quickstart/noxfile.py index ab2c49227c3b..b90eef00f2d9 100644 --- a/packages/google-cloud-videointelligence/samples/quickstart/noxfile.py +++ b/packages/google-cloud-videointelligence/samples/quickstart/noxfile.py @@ -37,25 +37,28 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints # All new samples should feature them - "enforce_type_hints": False, + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -70,12 +73,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -84,7 +87,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -133,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG["enforce_type_hints"]: + if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -142,11 +145,9 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) - - # # Black # @@ -159,7 +160,6 @@ def blacken(session): session.run("black", *python_files) - # # Sample Tests # @@ -199,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/packages/google-cloud-videointelligence/samples/shotchange/noxfile.py b/packages/google-cloud-videointelligence/samples/shotchange/noxfile.py index e38c11b7a7e2..b90eef00f2d9 100644 --- a/packages/google-cloud-videointelligence/samples/shotchange/noxfile.py +++ b/packages/google-cloud-videointelligence/samples/shotchange/noxfile.py @@ -37,25 +37,28 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints # All new samples should feature them - "enforce_type_hints": False, + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -70,13 +73,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) - + ret.update(TEST_CONFIG['envs']) return ret @@ -85,7 +87,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -134,7 +136,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG["enforce_type_hints"]: + if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -143,11 +145,9 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) - - # # Black # @@ -160,7 +160,6 @@ def blacken(session): session.run("black", *python_files) - # # Sample Tests # @@ -200,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1_keywords.py b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1_keywords.py new file mode 100644 index 000000000000..cda96fc572ce --- /dev/null +++ b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1_keywords.py @@ -0,0 +1,179 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class videointelligenceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'annotate_video': ('features', 'input_uri', 'input_content', 'video_context', 'output_uri', 'location_id', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=videointelligenceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the videointelligence client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1beta2_keywords.py b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1beta2_keywords.py new file mode 100644 index 000000000000..cda96fc572ce --- /dev/null +++ b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1beta2_keywords.py @@ -0,0 +1,179 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class videointelligenceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'annotate_video': ('features', 'input_uri', 'input_content', 'video_context', 'output_uri', 'location_id', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=videointelligenceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the videointelligence client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p1beta1_keywords.py b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p1beta1_keywords.py new file mode 100644 index 000000000000..cda96fc572ce --- /dev/null +++ b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p1beta1_keywords.py @@ -0,0 +1,179 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class videointelligenceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'annotate_video': ('features', 'input_uri', 'input_content', 'video_context', 'output_uri', 'location_id', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=videointelligenceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the videointelligence client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p2beta1_keywords.py b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p2beta1_keywords.py new file mode 100644 index 000000000000..cda96fc572ce --- /dev/null +++ b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p2beta1_keywords.py @@ -0,0 +1,179 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class videointelligenceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'annotate_video': ('features', 'input_uri', 'input_content', 'video_context', 'output_uri', 'location_id', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=videointelligenceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the videointelligence client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p3beta1_keywords.py b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p3beta1_keywords.py new file mode 100644 index 000000000000..9a1b69bab84c --- /dev/null +++ b/packages/google-cloud-videointelligence/scripts/fixup_videointelligence_v1p3beta1_keywords.py @@ -0,0 +1,180 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class videointelligenceCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'annotate_video': ('features', 'input_uri', 'input_content', 'video_context', 'output_uri', 'location_id', ), + 'streaming_annotate_video': ('video_config', 'input_content', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=videointelligenceCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the videointelligence client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/packages/google-cloud-videointelligence/synth.metadata b/packages/google-cloud-videointelligence/synth.metadata index 35b127b8d2fd..b34a856b1c97 100644 --- a/packages/google-cloud-videointelligence/synth.metadata +++ b/packages/google-cloud-videointelligence/synth.metadata @@ -3,16 +3,16 @@ { "git": { "name": ".", - "remote": "git@github.com:googleapis/python-videointelligence.git", - "sha": "128b6c8cc9fcf6da538f0fd0adcd5dd885431ccb" + "remote": "https://github.com/googleapis/python-videointelligence.git", + "sha": "1a68219142ed23c434417808da9fcdca3812280d" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "5fdb685a684269e07c10c7518372eb5d7b6bc0a9", - "internalRef": "342906697" + "sha": "468a94a87b9f80d22d1f5e3076d5bab73a5c996f", + "internalRef": "346664095" } }, { @@ -78,7 +78,6 @@ } ], "generatedFiles": [ - ".coveragerc", ".flake8", ".github/CONTRIBUTING.md", ".github/ISSUE_TEMPLATE/bug_report.md", @@ -130,92 +129,106 @@ "docs/_static/custom.css", "docs/_templates/layout.html", "docs/conf.py", - "docs/gapic/v1/api.rst", - "docs/gapic/v1/types.rst", - "docs/gapic/v1beta2/api.rst", - "docs/gapic/v1beta2/types.rst", - "docs/gapic/v1p1beta1/api.rst", - "docs/gapic/v1p1beta1/types.rst", - "docs/gapic/v1p2beta1/api.rst", - "docs/gapic/v1p2beta1/types.rst", - "docs/gapic/v1p3beta1/api.rst", - "docs/gapic/v1p3beta1/types.rst", "docs/multiprocessing.rst", - "google/__init__.py", - "google/cloud/__init__.py", - "google/cloud/videointelligence.py", + "docs/videointelligence_v1/services.rst", + "docs/videointelligence_v1/types.rst", + "docs/videointelligence_v1beta2/services.rst", + "docs/videointelligence_v1beta2/types.rst", + "docs/videointelligence_v1p1beta1/services.rst", + "docs/videointelligence_v1p1beta1/types.rst", + "docs/videointelligence_v1p2beta1/services.rst", + "docs/videointelligence_v1p2beta1/types.rst", + "docs/videointelligence_v1p3beta1/services.rst", + "docs/videointelligence_v1p3beta1/types.rst", + "google/cloud/videointelligence/__init__.py", + "google/cloud/videointelligence/py.typed", "google/cloud/videointelligence_v1/__init__.py", - "google/cloud/videointelligence_v1/gapic/__init__.py", - "google/cloud/videointelligence_v1/gapic/enums.py", - "google/cloud/videointelligence_v1/gapic/transports/__init__.py", - "google/cloud/videointelligence_v1/gapic/transports/video_intelligence_service_grpc_transport.py", - "google/cloud/videointelligence_v1/gapic/video_intelligence_service_client.py", - "google/cloud/videointelligence_v1/gapic/video_intelligence_service_client_config.py", - "google/cloud/videointelligence_v1/proto/__init__.py", "google/cloud/videointelligence_v1/proto/video_intelligence.proto", - "google/cloud/videointelligence_v1/proto/video_intelligence_pb2.py", - "google/cloud/videointelligence_v1/proto/video_intelligence_pb2_grpc.py", - "google/cloud/videointelligence_v1/types.py", + "google/cloud/videointelligence_v1/py.typed", + "google/cloud/videointelligence_v1/services/__init__.py", + "google/cloud/videointelligence_v1/services/video_intelligence_service/__init__.py", + "google/cloud/videointelligence_v1/services/video_intelligence_service/async_client.py", + "google/cloud/videointelligence_v1/services/video_intelligence_service/client.py", + "google/cloud/videointelligence_v1/services/video_intelligence_service/transports/__init__.py", + "google/cloud/videointelligence_v1/services/video_intelligence_service/transports/base.py", + "google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc.py", + "google/cloud/videointelligence_v1/services/video_intelligence_service/transports/grpc_asyncio.py", + "google/cloud/videointelligence_v1/types/__init__.py", + "google/cloud/videointelligence_v1/types/video_intelligence.py", "google/cloud/videointelligence_v1beta2/__init__.py", - "google/cloud/videointelligence_v1beta2/gapic/__init__.py", - "google/cloud/videointelligence_v1beta2/gapic/enums.py", - "google/cloud/videointelligence_v1beta2/gapic/transports/__init__.py", - "google/cloud/videointelligence_v1beta2/gapic/transports/video_intelligence_service_grpc_transport.py", - "google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client.py", - "google/cloud/videointelligence_v1beta2/gapic/video_intelligence_service_client_config.py", - "google/cloud/videointelligence_v1beta2/proto/__init__.py", "google/cloud/videointelligence_v1beta2/proto/video_intelligence.proto", - "google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2.py", - "google/cloud/videointelligence_v1beta2/proto/video_intelligence_pb2_grpc.py", - "google/cloud/videointelligence_v1beta2/types.py", + "google/cloud/videointelligence_v1beta2/py.typed", + "google/cloud/videointelligence_v1beta2/services/__init__.py", + "google/cloud/videointelligence_v1beta2/services/video_intelligence_service/__init__.py", + "google/cloud/videointelligence_v1beta2/services/video_intelligence_service/async_client.py", + "google/cloud/videointelligence_v1beta2/services/video_intelligence_service/client.py", + "google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/__init__.py", + "google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/base.py", + "google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc.py", + "google/cloud/videointelligence_v1beta2/services/video_intelligence_service/transports/grpc_asyncio.py", + "google/cloud/videointelligence_v1beta2/types/__init__.py", + "google/cloud/videointelligence_v1beta2/types/video_intelligence.py", "google/cloud/videointelligence_v1p1beta1/__init__.py", - "google/cloud/videointelligence_v1p1beta1/gapic/__init__.py", - "google/cloud/videointelligence_v1p1beta1/gapic/enums.py", - "google/cloud/videointelligence_v1p1beta1/gapic/transports/__init__.py", - "google/cloud/videointelligence_v1p1beta1/gapic/transports/video_intelligence_service_grpc_transport.py", - "google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client.py", - "google/cloud/videointelligence_v1p1beta1/gapic/video_intelligence_service_client_config.py", - "google/cloud/videointelligence_v1p1beta1/proto/__init__.py", "google/cloud/videointelligence_v1p1beta1/proto/video_intelligence.proto", - "google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2.py", - "google/cloud/videointelligence_v1p1beta1/proto/video_intelligence_pb2_grpc.py", - "google/cloud/videointelligence_v1p1beta1/types.py", + "google/cloud/videointelligence_v1p1beta1/py.typed", + "google/cloud/videointelligence_v1p1beta1/services/__init__.py", + "google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/__init__.py", + "google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/async_client.py", + "google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/client.py", + "google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/__init__.py", + "google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/base.py", + "google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc.py", + "google/cloud/videointelligence_v1p1beta1/services/video_intelligence_service/transports/grpc_asyncio.py", + "google/cloud/videointelligence_v1p1beta1/types/__init__.py", + "google/cloud/videointelligence_v1p1beta1/types/video_intelligence.py", "google/cloud/videointelligence_v1p2beta1/__init__.py", - "google/cloud/videointelligence_v1p2beta1/gapic/__init__.py", - "google/cloud/videointelligence_v1p2beta1/gapic/enums.py", - "google/cloud/videointelligence_v1p2beta1/gapic/transports/__init__.py", - "google/cloud/videointelligence_v1p2beta1/gapic/transports/video_intelligence_service_grpc_transport.py", - "google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client.py", - "google/cloud/videointelligence_v1p2beta1/gapic/video_intelligence_service_client_config.py", - "google/cloud/videointelligence_v1p2beta1/proto/__init__.py", "google/cloud/videointelligence_v1p2beta1/proto/video_intelligence.proto", - "google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2.py", - "google/cloud/videointelligence_v1p2beta1/proto/video_intelligence_pb2_grpc.py", - "google/cloud/videointelligence_v1p2beta1/types.py", + "google/cloud/videointelligence_v1p2beta1/py.typed", + "google/cloud/videointelligence_v1p2beta1/services/__init__.py", + "google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/__init__.py", + "google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/async_client.py", + "google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/client.py", + "google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/__init__.py", + "google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/base.py", + "google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc.py", + "google/cloud/videointelligence_v1p2beta1/services/video_intelligence_service/transports/grpc_asyncio.py", + "google/cloud/videointelligence_v1p2beta1/types/__init__.py", + "google/cloud/videointelligence_v1p2beta1/types/video_intelligence.py", "google/cloud/videointelligence_v1p3beta1/__init__.py", - "google/cloud/videointelligence_v1p3beta1/gapic/__init__.py", - "google/cloud/videointelligence_v1p3beta1/gapic/enums.py", - "google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client.py", - "google/cloud/videointelligence_v1p3beta1/gapic/streaming_video_intelligence_service_client_config.py", - "google/cloud/videointelligence_v1p3beta1/gapic/transports/__init__.py", - "google/cloud/videointelligence_v1p3beta1/gapic/transports/streaming_video_intelligence_service_grpc_transport.py", - "google/cloud/videointelligence_v1p3beta1/gapic/transports/video_intelligence_service_grpc_transport.py", - "google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client.py", - "google/cloud/videointelligence_v1p3beta1/gapic/video_intelligence_service_client_config.py", - "google/cloud/videointelligence_v1p3beta1/proto/__init__.py", "google/cloud/videointelligence_v1p3beta1/proto/video_intelligence.proto", - "google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2.py", - "google/cloud/videointelligence_v1p3beta1/proto/video_intelligence_pb2_grpc.py", - "google/cloud/videointelligence_v1p3beta1/types.py", + "google/cloud/videointelligence_v1p3beta1/py.typed", + "google/cloud/videointelligence_v1p3beta1/services/__init__.py", + "google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/__init__.py", + "google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/async_client.py", + "google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/client.py", + "google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/__init__.py", + "google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/base.py", + "google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc.py", + "google/cloud/videointelligence_v1p3beta1/services/streaming_video_intelligence_service/transports/grpc_asyncio.py", + "google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/__init__.py", + "google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/async_client.py", + "google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/client.py", + "google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/__init__.py", + "google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/base.py", + "google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc.py", + "google/cloud/videointelligence_v1p3beta1/services/video_intelligence_service/transports/grpc_asyncio.py", + "google/cloud/videointelligence_v1p3beta1/types/__init__.py", + "google/cloud/videointelligence_v1p3beta1/types/video_intelligence.py", + "mypy.ini", "noxfile.py", "renovate.json", "samples/AUTHORING_GUIDE.md", "samples/CONTRIBUTING.md", - "samples/analyze/README.rst", "samples/analyze/noxfile.py", - "samples/labels/README.rst", "samples/labels/noxfile.py", + "samples/quickstart/noxfile.py", + "samples/shotchange/noxfile.py", "scripts/decrypt-secrets.sh", + "scripts/fixup_videointelligence_v1_keywords.py", + "scripts/fixup_videointelligence_v1beta2_keywords.py", + "scripts/fixup_videointelligence_v1p1beta1_keywords.py", + "scripts/fixup_videointelligence_v1p2beta1_keywords.py", + "scripts/fixup_videointelligence_v1p3beta1_keywords.py", "scripts/readme-gen/readme_gen.py", "scripts/readme-gen/templates/README.tmpl.rst", "scripts/readme-gen/templates/auth.tmpl.rst", @@ -224,11 +237,16 @@ "scripts/readme-gen/templates/install_portaudio.tmpl.rst", "setup.cfg", "testing/.gitignore", - "tests/unit/gapic/v1/test_video_intelligence_service_client_v1.py", - "tests/unit/gapic/v1beta2/test_video_intelligence_service_client_v1beta2.py", - "tests/unit/gapic/v1p1beta1/test_video_intelligence_service_client_v1p1beta1.py", - "tests/unit/gapic/v1p2beta1/test_video_intelligence_service_client_v1p2beta1.py", - "tests/unit/gapic/v1p3beta1/test_streaming_video_intelligence_service_client_v1p3beta1.py", - "tests/unit/gapic/v1p3beta1/test_video_intelligence_service_client_v1p3beta1.py" + "tests/unit/gapic/videointelligence_v1/__init__.py", + "tests/unit/gapic/videointelligence_v1/test_video_intelligence_service.py", + "tests/unit/gapic/videointelligence_v1beta2/__init__.py", + "tests/unit/gapic/videointelligence_v1beta2/test_video_intelligence_service.py", + "tests/unit/gapic/videointelligence_v1p1beta1/__init__.py", + "tests/unit/gapic/videointelligence_v1p1beta1/test_video_intelligence_service.py", + "tests/unit/gapic/videointelligence_v1p2beta1/__init__.py", + "tests/unit/gapic/videointelligence_v1p2beta1/test_video_intelligence_service.py", + "tests/unit/gapic/videointelligence_v1p3beta1/__init__.py", + "tests/unit/gapic/videointelligence_v1p3beta1/test_streaming_video_intelligence_service.py", + "tests/unit/gapic/videointelligence_v1p3beta1/test_video_intelligence_service.py" ] } \ No newline at end of file