blob: 09c321f35029980d0ba64b2ecfc168e6b0172459 [file] [log] [blame]
<html><body>
<style>
body, h1, h2, h3, div, span, p, pre, a {
margin: 0;
padding: 0;
border: 0;
font-weight: inherit;
font-style: inherit;
font-size: 100%;
font-family: inherit;
vertical-align: baseline;
}
body {
font-size: 13px;
padding: 1em;
}
h1 {
font-size: 26px;
margin-bottom: 1em;
}
h2 {
font-size: 24px;
margin-bottom: 1em;
}
h3 {
font-size: 20px;
margin-bottom: 1em;
margin-top: 1em;
}
pre, code {
line-height: 1.5;
font-family: Monaco, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', 'Lucida Console', monospace;
}
pre {
margin-top: 0.5em;
}
h1, h2, h3, p {
font-family: Arial, sans serif;
}
h1, h2, h3 {
border-bottom: solid #CCC 1px;
}
.toc_element {
margin-top: 0.5em;
}
.firstline {
margin-left: 2 em;
}
.method {
margin-top: 1em;
border: solid 1px #CCC;
padding: 1em;
background: #EEE;
}
.details {
font-weight: bold;
font-size: 14px;
}
</style>
<h1><a href="videointelligence_v1beta2.html">Cloud Video Intelligence API</a> . <a href="videointelligence_v1beta2.videos.html">videos</a></h1>
<h2>Instance Methods</h2>
<p class="toc_element">
<code><a href="#annotate">annotate(body=None, x__xgafv=None)</a></code></p>
<p class="firstline">Performs asynchronous video annotation. Progress and results can be</p>
<h3>Method Details</h3>
<div class="method">
<code class="details" id="annotate">annotate(body=None, x__xgafv=None)</code>
<pre>Performs asynchronous video annotation. Progress and results can be
retrieved through the `google.longrunning.Operations` interface.
`Operation.metadata` contains `AnnotateVideoProgress` (progress).
`Operation.response` contains `AnnotateVideoResponse` (results).
Args:
body: object, The request body.
The object takes the form of:
{ # Video annotation request.
&quot;outputUri&quot;: &quot;A String&quot;, # Optional. Location where the output (in JSON format) should be stored.
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
# URIs are supported. These must be specified in the following format:
# `gs://bucket-id/object-id` (other URI formats return
# google.rpc.Code.INVALID_ARGUMENT). For more information, see
# [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
&quot;inputContent&quot;: &quot;A String&quot;, # The video data bytes.
# If unset, the input video(s) should be specified via the `input_uri`.
# If set, `input_uri` must be unset.
&quot;videoContext&quot;: { # Video context and/or feature-specific parameters. # Additional video context and/or feature-specific parameters.
&quot;objectTrackingConfig&quot;: { # Config for OBJECT_TRACKING. # Config for OBJECT_TRACKING.
&quot;model&quot;: &quot;A String&quot;, # Model to use for object tracking.
# Supported values: &quot;builtin/stable&quot; (the default if unset) and
# &quot;builtin/latest&quot;.
},
&quot;textDetectionConfig&quot;: { # Config for TEXT_DETECTION. # Config for TEXT_DETECTION.
&quot;languageHints&quot;: [ # Language hint can be specified if the language to be detected is known a
# priori. It can increase the accuracy of the detection. Language hint must
# be language code in BCP-47 format.
#
# Automatic language detection is performed if no hint is provided.
&quot;A String&quot;,
],
&quot;model&quot;: &quot;A String&quot;, # Model to use for text detection.
# Supported values: &quot;builtin/stable&quot; (the default if unset) and
# &quot;builtin/latest&quot;.
},
&quot;labelDetectionConfig&quot;: { # Config for LABEL_DETECTION. # Config for LABEL_DETECTION.
&quot;videoConfidenceThreshold&quot;: 3.14, # The confidence threshold we perform filtering on the labels from
# video-level and shot-level detections. If not set, it&#x27;s set to 0.3 by
# default. The valid range for this threshold is [0.1, 0.9]. Any value set
# outside of this range will be clipped.
# Note: For best results, follow the default threshold. We will update
# the default threshold everytime when we release a new model.
&quot;stationaryCamera&quot;: True or False, # Whether the video has been shot from a stationary (i.e., non-moving)
# camera. When set to true, might improve detection accuracy for moving
# objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
&quot;frameConfidenceThreshold&quot;: 3.14, # The confidence threshold we perform filtering on the labels from
# frame-level detection. If not set, it is set to 0.4 by default. The valid
# range for this threshold is [0.1, 0.9]. Any value set outside of this
# range will be clipped.
# Note: For best results, follow the default threshold. We will update
# the default threshold everytime when we release a new model.
&quot;model&quot;: &quot;A String&quot;, # Model to use for label detection.
# Supported values: &quot;builtin/stable&quot; (the default if unset) and
# &quot;builtin/latest&quot;.
&quot;labelDetectionMode&quot;: &quot;A String&quot;, # What labels should be detected with LABEL_DETECTION, in addition to
# video-level labels or segment-level labels.
# If unspecified, defaults to `SHOT_MODE`.
},
&quot;speechTranscriptionConfig&quot;: { # Config for SPEECH_TRANSCRIPTION. # Config for SPEECH_TRANSCRIPTION.
&quot;diarizationSpeakerCount&quot;: 42, # Optional. If set, specifies the estimated number of speakers in the conversation.
# If not set, defaults to &#x27;2&#x27;.
# Ignored unless enable_speaker_diarization is set to true.
&quot;enableWordConfidence&quot;: True or False, # Optional. If `true`, the top result includes a list of words and the
# confidence for those words. If `false`, no word-level confidence
# information is returned. The default is `false`.
&quot;maxAlternatives&quot;: 42, # Optional. Maximum number of recognition hypotheses to be returned.
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
# within each `SpeechTranscription`. The server may return fewer than
# `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
# return a maximum of one. If omitted, will return a maximum of one.
&quot;enableAutomaticPunctuation&quot;: True or False, # Optional. If &#x27;true&#x27;, adds punctuation to recognition result hypotheses.
# This feature is only available in select languages. Setting this for
# requests in other languages has no effect at all. The default &#x27;false&#x27; value
# does not add punctuation to result hypotheses. NOTE: &quot;This is currently
# offered as an experimental service, complimentary to all users. In the
# future this may be exclusively available as a premium feature.&quot;
&quot;languageCode&quot;: &quot;A String&quot;, # Required. *Required* The language of the supplied audio as a
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
# Example: &quot;en-US&quot;.
# See [Language Support](https://cloud.google.com/speech/docs/languages)
# for a list of the currently supported language codes.
&quot;filterProfanity&quot;: True or False, # Optional. If set to `true`, the server will attempt to filter out
# profanities, replacing all but the initial character in each filtered word
# with asterisks, e.g. &quot;f***&quot;. If set to `false` or omitted, profanities
# won&#x27;t be filtered out.
&quot;audioTracks&quot;: [ # Optional. For file formats, such as MXF or MKV, supporting multiple audio
# tracks, specify up to two tracks. Default: track 0.
42,
],
&quot;speechContexts&quot;: [ # Optional. A means to provide context to assist the speech recognition.
{ # Provides &quot;hints&quot; to the speech recognizer to favor specific words and phrases
# in the results.
&quot;phrases&quot;: [ # Optional. A list of strings containing words and phrases &quot;hints&quot; so that
# the speech recognition is more likely to recognize them. This can be used
# to improve the accuracy for specific words and phrases, for example, if
# specific commands are typically spoken by the user. This can also be used
# to add additional words to the vocabulary of the recognizer. See
# [usage limits](https://cloud.google.com/speech/limits#content).
&quot;A String&quot;,
],
},
],
&quot;enableSpeakerDiarization&quot;: True or False, # Optional. If &#x27;true&#x27;, enables speaker detection for each recognized word in
# the top alternative of the recognition result using a speaker_tag provided
# in the WordInfo.
# Note: When this is true, we send all the words from the beginning of the
# audio for the top alternative in every consecutive response.
# This is done in order to improve our speaker tags as our models learn to
# identify the speakers in the conversation over time.
},
&quot;explicitContentDetectionConfig&quot;: { # Config for EXPLICIT_CONTENT_DETECTION. # Config for EXPLICIT_CONTENT_DETECTION.
&quot;model&quot;: &quot;A String&quot;, # Model to use for explicit content detection.
# Supported values: &quot;builtin/stable&quot; (the default if unset) and
# &quot;builtin/latest&quot;.
},
&quot;shotChangeDetectionConfig&quot;: { # Config for SHOT_CHANGE_DETECTION. # Config for SHOT_CHANGE_DETECTION.
&quot;model&quot;: &quot;A String&quot;, # Model to use for shot change detection.
# Supported values: &quot;builtin/stable&quot; (the default if unset) and
# &quot;builtin/latest&quot;.
},
&quot;segments&quot;: [ # Video segments to annotate. The segments may overlap and are not required
# to be contiguous or span the whole video. If unspecified, each video is
# treated as a single segment.
{ # Video segment.
&quot;endTimeOffset&quot;: &quot;A String&quot;, # Time-offset, relative to the beginning of the video,
# corresponding to the end of the segment (inclusive).
&quot;startTimeOffset&quot;: &quot;A String&quot;, # Time-offset, relative to the beginning of the video,
# corresponding to the start of the segment (inclusive).
},
],
},
&quot;features&quot;: [ # Required. Requested video annotation features.
&quot;A String&quot;,
],
&quot;locationId&quot;: &quot;A String&quot;, # Optional. Cloud region where annotation should take place. Supported cloud
# regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
# region is specified, the region will be determined based on video file
# location.
&quot;inputUri&quot;: &quot;A String&quot;, # Input video location. Currently, only
# [Cloud Storage](https://cloud.google.com/storage/) URIs are
# supported. URIs must be specified in the following format:
# `gs://bucket-id/object-id` (other URI formats return
# google.rpc.Code.INVALID_ARGUMENT). For more information, see
# [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
# To identify multiple videos, a video URI may include wildcards in the
# `object-id`. Supported wildcards: &#x27;*&#x27; to match 0 or more characters;
# &#x27;?&#x27; to match 1 character. If unset, the input video should be embedded
# in the request as `input_content`. If set, `input_content` must be unset.
}
x__xgafv: string, V1 error format.
Allowed values
1 - v1 error format
2 - v2 error format
Returns:
An object of the form:
{ # This resource represents a long-running operation that is the result of a
# network API call.
&quot;done&quot;: True or False, # If the value is `false`, it means the operation is still in progress.
# If `true`, the operation is completed, and either `error` or `response` is
# available.
&quot;error&quot;: { # The `Status` type defines a logical error model that is suitable for # The error result of the operation in case of failure or cancellation.
# different programming environments, including REST APIs and RPC APIs. It is
# used by [gRPC](https://github.com/grpc). Each `Status` message contains
# three pieces of data: error code, error message, and error details.
#
# You can find out more about this error model and how to work with it in the
# [API Design Guide](https://cloud.google.com/apis/design/errors).
&quot;details&quot;: [ # A list of messages that carry the error details. There is a common set of
# message types for APIs to use.
{
&quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
},
],
&quot;message&quot;: &quot;A String&quot;, # A developer-facing error message, which should be in English. Any
# user-facing error message should be localized and sent in the
# google.rpc.Status.details field, or localized by the client.
&quot;code&quot;: 42, # The status code, which should be an enum value of google.rpc.Code.
},
&quot;response&quot;: { # The normal response of the operation in case of success. If the original
# method returns no data on success, such as `Delete`, the response is
# `google.protobuf.Empty`. If the original method is standard
# `Get`/`Create`/`Update`, the response should be the resource. For other
# methods, the response should have the type `XxxResponse`, where `Xxx`
# is the original method name. For example, if the original method name
# is `TakeSnapshot()`, the inferred response type is
# `TakeSnapshotResponse`.
&quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
},
&quot;metadata&quot;: { # Service-specific metadata associated with the operation. It typically
# contains progress information and common metadata such as create time.
# Some services might not provide such metadata. Any method that returns a
# long-running operation should document the metadata type, if any.
&quot;a_key&quot;: &quot;&quot;, # Properties of the object. Contains field @type with type URL.
},
&quot;name&quot;: &quot;A String&quot;, # The server-assigned name, which is only unique within the same service that
# originally returns it. If you use the default HTTP mapping, the
# `name` should be a resource name ending with `operations/{unique_id}`.
}</pre>
</div>
</body></html>