Skip to content

Commit

Permalink
Google APIs updated at 2024-06-02 (#142)
Browse files Browse the repository at this point in the history
Co-authored-by: GitHub Actions <>
  • Loading branch information
github-actions[bot] authored Jun 2, 2024
1 parent efe421c commit db5adfe
Show file tree
Hide file tree
Showing 59 changed files with 31,311 additions and 2,199 deletions.
42 changes: 37 additions & 5 deletions gcloud-sdk/genproto/google.ai.generativelanguage.v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,12 @@ pub mod part {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Blob {
/// The IANA standard MIME type of the source data.
/// Accepted types include: "image/png", "image/jpeg", "image/heic",
/// "image/heif", "image/webp".
/// Examples:
/// - image/png
/// - image/jpeg
/// If an unsupported MIME type is provided, an error will be returned. For a
/// complete list of supported types, see [Supported file
/// formats](<https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats>).
#[prost(string, tag = "1")]
pub mime_type: ::prost::alloc::string::String,
/// Raw bytes for media formats.
Expand Down Expand Up @@ -169,7 +173,7 @@ pub mod safety_rating {
}
/// Safety setting, affecting the safety-blocking behavior.
///
/// Passing a safety setting for a category changes the allowed proability that
/// Passing a safety setting for a category changes the allowed probability that
/// content is blocked.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
Expand Down Expand Up @@ -418,6 +422,9 @@ pub struct GenerateContentResponse {
pub prompt_feedback: ::core::option::Option<
generate_content_response::PromptFeedback,
>,
/// Output only. Metadata on the generation requests' token usage.
#[prost(message, optional, tag = "3")]
pub usage_metadata: ::core::option::Option<generate_content_response::UsageMetadata>,
}
/// Nested message and enum types in `GenerateContentResponse`.
pub mod generate_content_response {
Expand Down Expand Up @@ -482,6 +489,20 @@ pub mod generate_content_response {
}
}
}
/// Metadata on the generation request's token usage.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct UsageMetadata {
/// Number of tokens in the prompt.
#[prost(int32, tag = "1")]
pub prompt_token_count: i32,
/// Total number of tokens across the generated candidates.
#[prost(int32, tag = "2")]
pub candidates_token_count: i32,
/// Total token count for the generation request (prompt + candidates).
#[prost(int32, tag = "3")]
pub total_token_count: i32,
}
}
/// A response candidate generated from the model.
#[allow(clippy::derive_partial_eq_without_eq)]
Expand Down Expand Up @@ -601,7 +622,8 @@ pub struct EmbedContentRequest {
pub title: ::core::option::Option<::prost::alloc::string::String>,
/// Optional. Optional reduced dimension for the output embedding. If set,
/// excessive values in the output embedding are truncated from the end.
/// Supported by `models/text-embedding-latest`.
/// Supported by newer models since 2024, and the earlier model
/// (`models/embedding-001`) cannot specify this value.
#[prost(int32, optional, tag = "5")]
pub output_dimensionality: ::core::option::Option<i32>,
}
Expand Down Expand Up @@ -662,9 +684,14 @@ pub struct CountTokensRequest {
/// Format: `models/{model}`
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
/// Required. The input given to the model as a prompt.
/// Optional. The input given to the model as a prompt. This field is ignored
/// when `generate_content_request` is set.
#[prost(message, repeated, tag = "2")]
pub contents: ::prost::alloc::vec::Vec<Content>,
/// Optional. The overall input given to the model. CountTokens will count
/// prompt, function calling, etc.
#[prost(message, optional, tag = "3")]
pub generate_content_request: ::core::option::Option<GenerateContentRequest>,
}
/// A response from `CountTokens`.
///
Expand Down Expand Up @@ -820,6 +847,11 @@ pub mod generative_service_client {
}
/// Generates a response from the model given an input
/// `GenerateContentRequest`.
///
/// Input capabilities differ between models, including tuned models. See the
/// [model guide](https://ai.google.dev/models/gemini) and
/// [tuning guide](https://ai.google.dev/docs/model_tuning_guidance) for
/// details.
pub async fn generate_content(
&mut self,
request: impl tonic::IntoRequest<super::GenerateContentRequest>,
Expand Down
127 changes: 122 additions & 5 deletions gcloud-sdk/genproto/google.ai.generativelanguage.v1beta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,12 @@ pub mod part {
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Blob {
/// The IANA standard MIME type of the source data.
/// Accepted types include: "image/png", "image/jpeg", "image/heic",
/// "image/heif", "image/webp".
/// Examples:
/// - image/png
/// - image/jpeg
/// If an unsupported MIME type is provided, an error will be returned. For a
/// complete list of supported types, see [Supported file
/// formats](<https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats>).
#[prost(string, tag = "1")]
pub mime_type: ::prost::alloc::string::String,
/// Raw bytes for media formats.
Expand Down Expand Up @@ -539,7 +543,7 @@ pub mod safety_rating {
}
/// Safety setting, affecting the safety-blocking behavior.
///
/// Passing a safety setting for a category changes the allowed proability that
/// Passing a safety setting for a category changes the allowed probability that
/// content is blocked.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
Expand Down Expand Up @@ -1056,6 +1060,81 @@ pub struct File {
/// Output only. The uri of the `File`.
#[prost(string, tag = "9")]
pub uri: ::prost::alloc::string::String,
/// Output only. Processing state of the File.
#[prost(enumeration = "file::State", tag = "10")]
pub state: i32,
/// Output only. Error status if File processing failed.
#[prost(message, optional, tag = "11")]
pub error: ::core::option::Option<super::super::super::rpc::Status>,
/// Metadata for the File.
#[prost(oneof = "file::Metadata", tags = "12")]
pub metadata: ::core::option::Option<file::Metadata>,
}
/// Nested message and enum types in `File`.
pub mod file {
/// States for the lifecycle of a File.
#[derive(
Clone,
Copy,
Debug,
PartialEq,
Eq,
Hash,
PartialOrd,
Ord,
::prost::Enumeration
)]
#[repr(i32)]
pub enum State {
/// The default value. This value is used if the state is omitted.
Unspecified = 0,
/// File is being processed and cannot be used for inference yet.
Processing = 1,
/// File is processed and available for inference.
Active = 2,
/// File failed processing.
Failed = 10,
}
impl State {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
State::Unspecified => "STATE_UNSPECIFIED",
State::Processing => "PROCESSING",
State::Active => "ACTIVE",
State::Failed => "FAILED",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"STATE_UNSPECIFIED" => Some(Self::Unspecified),
"PROCESSING" => Some(Self::Processing),
"ACTIVE" => Some(Self::Active),
"FAILED" => Some(Self::Failed),
_ => None,
}
}
}
/// Metadata for the File.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Metadata {
/// Output only. Metadata for a video.
#[prost(message, tag = "12")]
VideoMetadata(super::VideoMetadata),
}
}
/// Metadata for a video `File`.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VideoMetadata {
/// Duration of the video.
#[prost(message, optional, tag = "1")]
pub video_duration: ::core::option::Option<::prost_types::Duration>,
}
/// Request for `CreateFile`.
#[allow(clippy::derive_partial_eq_without_eq)]
Expand Down Expand Up @@ -1745,6 +1824,16 @@ pub struct GenerationConfig {
/// `application/json`: JSON response in the candidates.
#[prost(string, tag = "13")]
pub response_mime_type: ::prost::alloc::string::String,
/// Optional. Output response schema of the generated candidate text when
/// response mime type can have schema. Schema can be objects, primitives or
/// arrays and is a subset of [OpenAPI
/// schema](<https://spec.openapis.org/oas/v3.0.3#schema>).
///
/// If set, a compatible response_mime_type must also be set.
/// Compatible mimetypes:
/// `application/json`: Schema for JSON response.
#[prost(message, optional, tag = "14")]
pub response_schema: ::core::option::Option<Schema>,
}
/// Configuration for retrieving grounding content from a `Corpus` or
/// `Document` created using the Semantic Retriever API.
Expand Down Expand Up @@ -1791,6 +1880,9 @@ pub struct GenerateContentResponse {
pub prompt_feedback: ::core::option::Option<
generate_content_response::PromptFeedback,
>,
/// Output only. Metadata on the generation requests' token usage.
#[prost(message, optional, tag = "3")]
pub usage_metadata: ::core::option::Option<generate_content_response::UsageMetadata>,
}
/// Nested message and enum types in `GenerateContentResponse`.
pub mod generate_content_response {
Expand Down Expand Up @@ -1855,6 +1947,20 @@ pub mod generate_content_response {
}
}
}
/// Metadata on the generation request's token usage.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct UsageMetadata {
/// Number of tokens in the prompt.
#[prost(int32, tag = "1")]
pub prompt_token_count: i32,
/// Total number of tokens across the generated candidates.
#[prost(int32, tag = "2")]
pub candidates_token_count: i32,
/// Total token count for the generation request (prompt + candidates).
#[prost(int32, tag = "3")]
pub total_token_count: i32,
}
}
/// A response candidate generated from the model.
#[allow(clippy::derive_partial_eq_without_eq)]
Expand Down Expand Up @@ -2257,7 +2363,8 @@ pub struct EmbedContentRequest {
pub title: ::core::option::Option<::prost::alloc::string::String>,
/// Optional. Optional reduced dimension for the output embedding. If set,
/// excessive values in the output embedding are truncated from the end.
/// Supported by `models/text-embedding-latest`.
/// Supported by newer models since 2024, and the earlier model
/// (`models/embedding-001`) cannot specify this value.
#[prost(int32, optional, tag = "5")]
pub output_dimensionality: ::core::option::Option<i32>,
}
Expand Down Expand Up @@ -2318,9 +2425,14 @@ pub struct CountTokensRequest {
/// Format: `models/{model}`
#[prost(string, tag = "1")]
pub model: ::prost::alloc::string::String,
/// Required. The input given to the model as a prompt.
/// Optional. The input given to the model as a prompt. This field is ignored
/// when `generate_content_request` is set.
#[prost(message, repeated, tag = "2")]
pub contents: ::prost::alloc::vec::Vec<Content>,
/// Optional. The overall input given to the model. CountTokens will count
/// prompt, function calling, etc.
#[prost(message, optional, tag = "3")]
pub generate_content_request: ::core::option::Option<GenerateContentRequest>,
}
/// A response from `CountTokens`.
///
Expand Down Expand Up @@ -2476,6 +2588,11 @@ pub mod generative_service_client {
}
/// Generates a response from the model given an input
/// `GenerateContentRequest`.
///
/// Input capabilities differ between models, including tuned models. See the
/// [model guide](https://ai.google.dev/models/gemini) and
/// [tuning guide](https://ai.google.dev/docs/model_tuning_guidance) for
/// details.
pub async fn generate_content(
&mut self,
request: impl tonic::IntoRequest<super::GenerateContentRequest>,
Expand Down
Loading

0 comments on commit db5adfe

Please sign in to comment.