Struct google_api_proto::google::cloud::videointelligence::v1::VideoAnnotationResults
source · pub struct VideoAnnotationResults {Show 17 fields
pub input_uri: String,
pub segment: Option<VideoSegment>,
pub segment_label_annotations: Vec<LabelAnnotation>,
pub segment_presence_label_annotations: Vec<LabelAnnotation>,
pub shot_label_annotations: Vec<LabelAnnotation>,
pub shot_presence_label_annotations: Vec<LabelAnnotation>,
pub frame_label_annotations: Vec<LabelAnnotation>,
pub face_annotations: Vec<FaceAnnotation>,
pub face_detection_annotations: Vec<FaceDetectionAnnotation>,
pub shot_annotations: Vec<VideoSegment>,
pub explicit_annotation: Option<ExplicitContentAnnotation>,
pub speech_transcriptions: Vec<SpeechTranscription>,
pub text_annotations: Vec<TextAnnotation>,
pub object_annotations: Vec<ObjectTrackingAnnotation>,
pub logo_recognition_annotations: Vec<LogoRecognitionAnnotation>,
pub person_detection_annotations: Vec<PersonDetectionAnnotation>,
pub error: Option<Status>,
}
Expand description
Annotation results for a single video.
Fields§
§input_uri: String
Video file location in Cloud Storage.
segment: Option<VideoSegment>
Video segment on which the annotation is run.
segment_label_annotations: Vec<LabelAnnotation>
Topical label annotations on video level or user-specified segment level. There is exactly one element for each unique label.
segment_presence_label_annotations: Vec<LabelAnnotation>
Presence label annotations on video level or user-specified segment level.
There is exactly one element for each unique label. Compared to the
existing topical segment_label_annotations
, this field presents more
fine-grained, segment-level labels detected in video content and is made
available only when the client sets LabelDetectionConfig.model
to
“builtin/latest” in the request.
shot_label_annotations: Vec<LabelAnnotation>
Topical label annotations on shot level. There is exactly one element for each unique label.
shot_presence_label_annotations: Vec<LabelAnnotation>
Presence label annotations on shot level. There is exactly one element for
each unique label. Compared to the existing topical
shot_label_annotations
, this field presents more fine-grained, shot-level
labels detected in video content and is made available only when the client
sets LabelDetectionConfig.model
to “builtin/latest” in the request.
frame_label_annotations: Vec<LabelAnnotation>
Label annotations on frame level. There is exactly one element for each unique label.
face_annotations: Vec<FaceAnnotation>
Deprecated. Please use face_detection_annotations
instead.
face_detection_annotations: Vec<FaceDetectionAnnotation>
Face detection annotations.
shot_annotations: Vec<VideoSegment>
Shot annotations. Each shot is represented as a video segment.
explicit_annotation: Option<ExplicitContentAnnotation>
Explicit content annotation.
speech_transcriptions: Vec<SpeechTranscription>
Speech transcription.
text_annotations: Vec<TextAnnotation>
OCR text detection and tracking. Annotations for list of detected text snippets. Each will have list of frame information associated with it.
object_annotations: Vec<ObjectTrackingAnnotation>
Annotations for list of objects detected and tracked in video.
logo_recognition_annotations: Vec<LogoRecognitionAnnotation>
Annotations for list of logos detected, tracked and recognized in video.
person_detection_annotations: Vec<PersonDetectionAnnotation>
Person detection annotations.
error: Option<Status>
If set, indicates an error. Note that for a single AnnotateVideoRequest
some videos may succeed and some may fail.
Trait Implementations§
source§impl Clone for VideoAnnotationResults
impl Clone for VideoAnnotationResults
source§fn clone(&self) -> VideoAnnotationResults
fn clone(&self) -> VideoAnnotationResults
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read moresource§impl Debug for VideoAnnotationResults
impl Debug for VideoAnnotationResults
source§impl Default for VideoAnnotationResults
impl Default for VideoAnnotationResults
source§impl Message for VideoAnnotationResults
impl Message for VideoAnnotationResults
source§fn encoded_len(&self) -> usize
fn encoded_len(&self) -> usize
source§fn encode(&self, buf: &mut impl BufMut) -> Result<(), EncodeError>where
Self: Sized,
fn encode(&self, buf: &mut impl BufMut) -> Result<(), EncodeError>where
Self: Sized,
source§fn encode_to_vec(&self) -> Vec<u8>where
Self: Sized,
fn encode_to_vec(&self) -> Vec<u8>where
Self: Sized,
source§fn encode_length_delimited(
&self,
buf: &mut impl BufMut,
) -> Result<(), EncodeError>where
Self: Sized,
fn encode_length_delimited(
&self,
buf: &mut impl BufMut,
) -> Result<(), EncodeError>where
Self: Sized,
source§fn encode_length_delimited_to_vec(&self) -> Vec<u8>where
Self: Sized,
fn encode_length_delimited_to_vec(&self) -> Vec<u8>where
Self: Sized,
source§fn decode(buf: impl Buf) -> Result<Self, DecodeError>where
Self: Default,
fn decode(buf: impl Buf) -> Result<Self, DecodeError>where
Self: Default,
source§fn decode_length_delimited(buf: impl Buf) -> Result<Self, DecodeError>where
Self: Default,
fn decode_length_delimited(buf: impl Buf) -> Result<Self, DecodeError>where
Self: Default,
source§fn merge(&mut self, buf: impl Buf) -> Result<(), DecodeError>where
Self: Sized,
fn merge(&mut self, buf: impl Buf) -> Result<(), DecodeError>where
Self: Sized,
self
. Read moresource§fn merge_length_delimited(&mut self, buf: impl Buf) -> Result<(), DecodeError>where
Self: Sized,
fn merge_length_delimited(&mut self, buf: impl Buf) -> Result<(), DecodeError>where
Self: Sized,
self
.source§impl PartialEq for VideoAnnotationResults
impl PartialEq for VideoAnnotationResults
source§fn eq(&self, other: &VideoAnnotationResults) -> bool
fn eq(&self, other: &VideoAnnotationResults) -> bool
self
and other
values to be equal, and is used
by ==
.impl StructuralPartialEq for VideoAnnotationResults
Auto Trait Implementations§
impl Freeze for VideoAnnotationResults
impl RefUnwindSafe for VideoAnnotationResults
impl Send for VideoAnnotationResults
impl Sync for VideoAnnotationResults
impl Unpin for VideoAnnotationResults
impl UnwindSafe for VideoAnnotationResults
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Instrument for T
impl<T> Instrument for T
§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
source§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T
in a tonic::Request