Struct google_api_proto::google::cloud::aiplatform::v1::model_service_client::ModelServiceClient
source · pub struct ModelServiceClient<T> { /* private fields */ }
Expand description
A service for managing Vertex AI’s machine learning Models.
Implementations§
source§impl<T> ModelServiceClient<T>where
T: GrpcService<BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
impl<T> ModelServiceClient<T>where
T: GrpcService<BoxBody>,
T::Error: Into<StdError>,
T::ResponseBody: Body<Data = Bytes> + Send + 'static,
<T::ResponseBody as Body>::Error: Into<StdError> + Send,
pub fn new(inner: T) -> Self
pub fn with_origin(inner: T, origin: Uri) -> Self
pub fn with_interceptor<F>(
inner: T,
interceptor: F,
) -> ModelServiceClient<InterceptedService<T, F>>where
F: Interceptor,
T::ResponseBody: Default,
T: Service<Request<BoxBody>, Response = Response<<T as GrpcService<BoxBody>>::ResponseBody>>,
<T as Service<Request<BoxBody>>>::Error: Into<StdError> + Send + Sync,
sourcepub fn send_compressed(self, encoding: CompressionEncoding) -> Self
pub fn send_compressed(self, encoding: CompressionEncoding) -> Self
Compress requests with the given encoding.
This requires the server to support it otherwise it might respond with an error.
sourcepub fn accept_compressed(self, encoding: CompressionEncoding) -> Self
pub fn accept_compressed(self, encoding: CompressionEncoding) -> Self
Enable decompressing responses.
sourcepub fn max_decoding_message_size(self, limit: usize) -> Self
pub fn max_decoding_message_size(self, limit: usize) -> Self
Limits the maximum size of a decoded message.
Default: 4MB
sourcepub fn max_encoding_message_size(self, limit: usize) -> Self
pub fn max_encoding_message_size(self, limit: usize) -> Self
Limits the maximum size of an encoded message.
Default: usize::MAX
sourcepub async fn upload_model(
&mut self,
request: impl IntoRequest<UploadModelRequest>,
) -> Result<Response<Operation>, Status>
pub async fn upload_model( &mut self, request: impl IntoRequest<UploadModelRequest>, ) -> Result<Response<Operation>, Status>
Uploads a Model artifact into Vertex AI.
sourcepub async fn get_model(
&mut self,
request: impl IntoRequest<GetModelRequest>,
) -> Result<Response<Model>, Status>
pub async fn get_model( &mut self, request: impl IntoRequest<GetModelRequest>, ) -> Result<Response<Model>, Status>
Gets a Model.
sourcepub async fn list_models(
&mut self,
request: impl IntoRequest<ListModelsRequest>,
) -> Result<Response<ListModelsResponse>, Status>
pub async fn list_models( &mut self, request: impl IntoRequest<ListModelsRequest>, ) -> Result<Response<ListModelsResponse>, Status>
Lists Models in a Location.
sourcepub async fn list_model_versions(
&mut self,
request: impl IntoRequest<ListModelVersionsRequest>,
) -> Result<Response<ListModelVersionsResponse>, Status>
pub async fn list_model_versions( &mut self, request: impl IntoRequest<ListModelVersionsRequest>, ) -> Result<Response<ListModelVersionsResponse>, Status>
Lists versions of the specified model.
sourcepub async fn update_model(
&mut self,
request: impl IntoRequest<UpdateModelRequest>,
) -> Result<Response<Model>, Status>
pub async fn update_model( &mut self, request: impl IntoRequest<UpdateModelRequest>, ) -> Result<Response<Model>, Status>
Updates a Model.
sourcepub async fn update_explanation_dataset(
&mut self,
request: impl IntoRequest<UpdateExplanationDatasetRequest>,
) -> Result<Response<Operation>, Status>
pub async fn update_explanation_dataset( &mut self, request: impl IntoRequest<UpdateExplanationDatasetRequest>, ) -> Result<Response<Operation>, Status>
Incrementally update the dataset used for an examples model.
sourcepub async fn delete_model(
&mut self,
request: impl IntoRequest<DeleteModelRequest>,
) -> Result<Response<Operation>, Status>
pub async fn delete_model( &mut self, request: impl IntoRequest<DeleteModelRequest>, ) -> Result<Response<Operation>, Status>
Deletes a Model.
A model cannot be deleted if any [Endpoint][google.cloud.aiplatform.v1.Endpoint] resource has a [DeployedModel][google.cloud.aiplatform.v1.DeployedModel] based on the model in its [deployed_models][google.cloud.aiplatform.v1.Endpoint.deployed_models] field.
sourcepub async fn delete_model_version(
&mut self,
request: impl IntoRequest<DeleteModelVersionRequest>,
) -> Result<Response<Operation>, Status>
pub async fn delete_model_version( &mut self, request: impl IntoRequest<DeleteModelVersionRequest>, ) -> Result<Response<Operation>, Status>
Deletes a Model version.
Model version can only be deleted if there are no [DeployedModels][google.cloud.aiplatform.v1.DeployedModel] created from it. Deleting the only version in the Model is not allowed. Use [DeleteModel][google.cloud.aiplatform.v1.ModelService.DeleteModel] for deleting the Model instead.
sourcepub async fn merge_version_aliases(
&mut self,
request: impl IntoRequest<MergeVersionAliasesRequest>,
) -> Result<Response<Model>, Status>
pub async fn merge_version_aliases( &mut self, request: impl IntoRequest<MergeVersionAliasesRequest>, ) -> Result<Response<Model>, Status>
Merges a set of aliases for a Model version.
sourcepub async fn export_model(
&mut self,
request: impl IntoRequest<ExportModelRequest>,
) -> Result<Response<Operation>, Status>
pub async fn export_model( &mut self, request: impl IntoRequest<ExportModelRequest>, ) -> Result<Response<Operation>, Status>
Exports a trained, exportable Model to a location specified by the user. A Model is considered to be exportable if it has at least one [supported export format][google.cloud.aiplatform.v1.Model.supported_export_formats].
sourcepub async fn copy_model(
&mut self,
request: impl IntoRequest<CopyModelRequest>,
) -> Result<Response<Operation>, Status>
pub async fn copy_model( &mut self, request: impl IntoRequest<CopyModelRequest>, ) -> Result<Response<Operation>, Status>
Copies an already existing Vertex AI Model into the specified Location. The source Model must exist in the same Project. When copying custom Models, the users themselves are responsible for [Model.metadata][google.cloud.aiplatform.v1.Model.metadata] content to be region-agnostic, as well as making sure that any resources (e.g. files) it depends on remain accessible.
sourcepub async fn import_model_evaluation(
&mut self,
request: impl IntoRequest<ImportModelEvaluationRequest>,
) -> Result<Response<ModelEvaluation>, Status>
pub async fn import_model_evaluation( &mut self, request: impl IntoRequest<ImportModelEvaluationRequest>, ) -> Result<Response<ModelEvaluation>, Status>
Imports an externally generated ModelEvaluation.
sourcepub async fn batch_import_model_evaluation_slices(
&mut self,
request: impl IntoRequest<BatchImportModelEvaluationSlicesRequest>,
) -> Result<Response<BatchImportModelEvaluationSlicesResponse>, Status>
pub async fn batch_import_model_evaluation_slices( &mut self, request: impl IntoRequest<BatchImportModelEvaluationSlicesRequest>, ) -> Result<Response<BatchImportModelEvaluationSlicesResponse>, Status>
Imports a list of externally generated ModelEvaluationSlice.
sourcepub async fn batch_import_evaluated_annotations(
&mut self,
request: impl IntoRequest<BatchImportEvaluatedAnnotationsRequest>,
) -> Result<Response<BatchImportEvaluatedAnnotationsResponse>, Status>
pub async fn batch_import_evaluated_annotations( &mut self, request: impl IntoRequest<BatchImportEvaluatedAnnotationsRequest>, ) -> Result<Response<BatchImportEvaluatedAnnotationsResponse>, Status>
Imports a list of externally generated EvaluatedAnnotations.
sourcepub async fn get_model_evaluation(
&mut self,
request: impl IntoRequest<GetModelEvaluationRequest>,
) -> Result<Response<ModelEvaluation>, Status>
pub async fn get_model_evaluation( &mut self, request: impl IntoRequest<GetModelEvaluationRequest>, ) -> Result<Response<ModelEvaluation>, Status>
Gets a ModelEvaluation.
sourcepub async fn list_model_evaluations(
&mut self,
request: impl IntoRequest<ListModelEvaluationsRequest>,
) -> Result<Response<ListModelEvaluationsResponse>, Status>
pub async fn list_model_evaluations( &mut self, request: impl IntoRequest<ListModelEvaluationsRequest>, ) -> Result<Response<ListModelEvaluationsResponse>, Status>
Lists ModelEvaluations in a Model.
sourcepub async fn get_model_evaluation_slice(
&mut self,
request: impl IntoRequest<GetModelEvaluationSliceRequest>,
) -> Result<Response<ModelEvaluationSlice>, Status>
pub async fn get_model_evaluation_slice( &mut self, request: impl IntoRequest<GetModelEvaluationSliceRequest>, ) -> Result<Response<ModelEvaluationSlice>, Status>
Gets a ModelEvaluationSlice.
sourcepub async fn list_model_evaluation_slices(
&mut self,
request: impl IntoRequest<ListModelEvaluationSlicesRequest>,
) -> Result<Response<ListModelEvaluationSlicesResponse>, Status>
pub async fn list_model_evaluation_slices( &mut self, request: impl IntoRequest<ListModelEvaluationSlicesRequest>, ) -> Result<Response<ListModelEvaluationSlicesResponse>, Status>
Lists ModelEvaluationSlices in a ModelEvaluation.
Trait Implementations§
source§impl<T: Clone> Clone for ModelServiceClient<T>
impl<T: Clone> Clone for ModelServiceClient<T>
source§fn clone(&self) -> ModelServiceClient<T>
fn clone(&self) -> ModelServiceClient<T>
1.0.0 · source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read moreAuto Trait Implementations§
impl<T> !Freeze for ModelServiceClient<T>
impl<T> RefUnwindSafe for ModelServiceClient<T>where
T: RefUnwindSafe,
impl<T> Send for ModelServiceClient<T>where
T: Send,
impl<T> Sync for ModelServiceClient<T>where
T: Sync,
impl<T> Unpin for ModelServiceClient<T>where
T: Unpin,
impl<T> UnwindSafe for ModelServiceClient<T>where
T: UnwindSafe,
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Instrument for T
impl<T> Instrument for T
§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
source§impl<T> IntoRequest<T> for T
impl<T> IntoRequest<T> for T
source§fn into_request(self) -> Request<T>
fn into_request(self) -> Request<T>
T
in a tonic::Request