Skip to content

Instantly share code, notes, and snippets.

@devoncarew
Created May 28, 2025 01:29
Show Gist options
  • Save devoncarew/b940b88cf8136e4e041907dc3c4e83c1 to your computer and use it in GitHub Desktop.
Save devoncarew/b940b88cf8136e4e041907dc3c4e83c1 to your computer and use it in GitHub Desktop.
generativelanguage API generated from the discovery docs
// Copyright (c) 2025, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// This is a generated file - do not edit.
import 'dart:typed_data';
import 'package:google_api_base/base.dart';
import 'package:google_api_base/src/encoding.dart';
import 'package:http/http.dart' as http;
/// The https url authority to connect to.
const String _authority = 'generativelanguage.googleapis.com';
/// Google API client for the Generative Language API.
///
/// The Gemini API allows developers to build generative AI applications using
/// Gemini models.
///
/// Gemini is our most capable model, built from the ground up to be multimodal.
/// It can generalize and seamlessly understand, operate across, and combine
/// different types of information including language, images, audio, video, and
/// code. You can use the Gemini API for use cases like reasoning across text
/// and images, content generation, dialogue agents, summarization and
/// classification systems, and more.
///
/// For additional documentation, see
/// https://developers.generativeai.google/api.
///
/// ### Getting started
///
/// To get started, create an instance of `GenerativeLanguage`:
///
/// ```dart
/// import 'package:google_api_generativelanguage_v1/generativelanguage.dart';
/// import 'package:googleapis_auth/auth_io.dart' as auth;
///
/// void main() {
/// final httpClient = auth.clientViaApplicationDefaultCredentials(
/// scopes: GenerativeLanguage.scopes,
/// );
/// final generativelanguage = GenerativeLanguage(httpClient);
///
/// // Perform operations on generativelanguage...
///
/// generativelanguage.close();
/// }
/// ```
///
/// ### Available APIs
///
/// The APIs available from a `GenerativeLanguage` instance are:
///
/// - `tunedModels`: [TunedModelsResource]
/// - `tunedModels.operations`: [TunedModelsOperationsResource]
/// - `generatedFiles`: [GeneratedFilesResource]
/// - `generatedFiles.operations`: [GeneratedFilesOperationsResource]
/// - `models`: [ModelsResource]
/// - `models.operations`: [ModelsOperationsResource]
/// - `operations`: [OperationsResource]
class GenerativeLanguage {
GenerativeLanguage(
http.Client client, {
Map<String, String>? additionalHeaders,
}) : client = ApiClient(
client,
clientName: 'google_api_generativelanguage_v1/0.1.0',
additionalHeaders: additionalHeaders,
) {
tunedModels = TunedModelsResource._(this.client);
generatedFiles = GeneratedFilesResource._(this.client);
models = ModelsResource._(this.client);
operations = OperationsResource._(this.client);
}
final ApiClient client;
/// Access the `tunedModels` methods and resources.
late final TunedModelsResource tunedModels;
/// Access the `generatedFiles` methods and resources.
late final GeneratedFilesResource generatedFiles;
/// Access the `models` methods and resources.
late final ModelsResource models;
/// Access the `operations` methods and resources.
late final OperationsResource operations;
/// Closes the underlying http client.
///
/// Once close is called, no other methods should be called. If close is
/// called while other asynchronous methods are running, the behavior is
/// undefined.
void close() => client.close();
}
/// The generated class for the `tunedModels` resource.
class TunedModelsResource {
TunedModelsResource._(this._client)
: operations = TunedModelsOperationsResource._(_client);
final ApiClient _client;
/// Access the `operations` methods and resources.
final TunedModelsOperationsResource operations;
/// Generates a [streamed
/// response](https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream)
/// from the model given an input `GenerateContentRequest`.
///
/// [model] Required. The name of the `Model` to use for generating the
/// completion. Format: `models/{model}`.
Stream<GenerateContentResponse> streamGenerateContent(
GenerateContentRequest request, {
required String model,
}) {
final url = Uri.https(_authority, 'v1/$model:streamGenerateContent');
return _client.postStreaming(
url,
GenerateContentResponse.fromJson,
request,
);
}
/// Generates a model response given an input `GenerateContentRequest`.
///
/// Refer to the [text generation
/// guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed
/// usage information. Input capabilities differ between models, including
/// tuned models. Refer to the [model
/// guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning
/// guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details.
///
/// [model] Required. The name of the `Model` to use for generating the
/// completion. Format: `models/{model}`.
Future<GenerateContentResponse> generateContent(
GenerateContentRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:generateContent');
return _client.post(url, GenerateContentResponse.fromJson, request);
}
}
/// The generated class for the `tunedModels.operations` resource.
class TunedModelsOperationsResource {
TunedModelsOperationsResource._(this._client);
final ApiClient _client;
/// Starts asynchronous cancellation on a long-running operation.
///
/// The server makes a best effort to cancel the operation, but success is not
/// guaranteed. If the server doesn't support this method, it returns
/// `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation
/// or other methods to check whether the cancellation succeeded or whether
/// the operation completed despite cancellation. On successful cancellation,
/// the operation is not deleted; instead, it becomes an operation with an
/// Operation.error value with a google.rpc.Status.code of `1`, corresponding
/// to `Code.CANCELLED`.
///
/// [name] The name of the operation resource to be cancelled.
Future<Empty> cancel(
CancelOperationRequest request, {
required String name,
}) async {
final url = Uri.https(_authority, 'v1/$name:cancel');
return _client.post(url, Empty.fromJson, request);
}
/// Gets the latest state of a long-running operation.
///
/// Clients can use this method to poll the operation result at intervals as
/// recommended by the API service.
///
/// [name] The name of the operation resource.
Future<Operation> get({required String name}) async {
final url = Uri.https(_authority, 'v1/$name');
return _client.get(url, Operation.fromJson);
}
/// Lists operations that match the specified filter in the request.
///
/// If the server doesn't support this method, it returns `UNIMPLEMENTED`.
///
/// [name] The name of the operation's parent resource.
///
/// [pageToken] The standard list page token.
///
/// [filter] The standard list filter.
///
/// [pageSize] The standard list page size.
Future<ListOperationsResponse> list({
required String name,
String? pageToken,
String? filter,
int? pageSize,
}) async {
final url = Uri.https(_authority, 'v1/$name/operations', {
if (pageToken != null) 'pageToken': pageToken,
if (filter != null) 'filter': filter,
if (pageSize != null) 'pageSize': '$pageSize',
});
return _client.get(url, ListOperationsResponse.fromJson);
}
}
/// The generated class for the `generatedFiles` resource.
class GeneratedFilesResource {
GeneratedFilesResource._(this._client)
: operations = GeneratedFilesOperationsResource._(_client);
final ApiClient _client;
/// Access the `operations` methods and resources.
final GeneratedFilesOperationsResource operations;
}
/// The generated class for the `generatedFiles.operations` resource.
class GeneratedFilesOperationsResource {
GeneratedFilesOperationsResource._(this._client);
final ApiClient _client;
/// Gets the latest state of a long-running operation.
///
/// Clients can use this method to poll the operation result at intervals as
/// recommended by the API service.
///
/// [name] The name of the operation resource.
Future<Operation> get({required String name}) async {
final url = Uri.https(_authority, 'v1/$name');
return _client.get(url, Operation.fromJson);
}
}
/// The generated class for the `models` resource.
class ModelsResource {
ModelsResource._(this._client)
: operations = ModelsOperationsResource._(_client);
final ApiClient _client;
/// Access the `operations` methods and resources.
final ModelsOperationsResource operations;
/// Generates multiple embedding vectors from the input `Content` which
/// consists of a batch of strings represented as `EmbedContentRequest`
/// objects.
///
/// [model] Required. The model's resource name. This serves as an ID for the
/// Model to use. This name should match a model name returned by the
/// `ListModels` method. Format: `models/{model}`
Future<BatchEmbedContentsResponse> batchEmbedContents(
BatchEmbedContentsRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:batchEmbedContents');
return _client.post(url, BatchEmbedContentsResponse.fromJson, request);
}
/// Runs a model's tokenizer on input `Content` and returns the token count.
///
/// Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens)
/// to learn more about tokens.
///
/// [model] Required. The model's resource name. This serves as an ID for the
/// Model to use. This name should match a model name returned by the
/// `ListModels` method. Format: `models/{model}`
Future<CountTokensResponse> countTokens(
CountTokensRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:countTokens');
return _client.post(url, CountTokensResponse.fromJson, request);
}
/// Generates a [streamed
/// response](https://ai.google.dev/gemini-api/docs/text-generation?lang=python#generate-a-text-stream)
/// from the model given an input `GenerateContentRequest`.
///
/// [model] Required. The name of the `Model` to use for generating the
/// completion. Format: `models/{model}`.
Stream<GenerateContentResponse> streamGenerateContent(
GenerateContentRequest request, {
required String model,
}) {
final url = Uri.https(_authority, 'v1/$model:streamGenerateContent');
return _client.postStreaming(
url,
GenerateContentResponse.fromJson,
request,
);
}
/// Gets information about a specific `Model` such as its version number,
/// token limits,
/// [parameters](https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters)
/// and other metadata.
///
/// Refer to the [Gemini models
/// guide](https://ai.google.dev/gemini-api/docs/models/gemini) for detailed
/// model information.
///
/// [name] Required. The resource name of the model. This name should match a
/// model name returned by the `ListModels` method. Format: `models/{model}`
Future<Model> get({required String name}) async {
final url = Uri.https(_authority, 'v1/$name');
return _client.get(url, Model.fromJson);
}
/// Generates a model response given an input `GenerateContentRequest`.
///
/// Refer to the [text generation
/// guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed
/// usage information. Input capabilities differ between models, including
/// tuned models. Refer to the [model
/// guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning
/// guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details.
///
/// [model] Required. The name of the `Model` to use for generating the
/// completion. Format: `models/{model}`.
Future<GenerateContentResponse> generateContent(
GenerateContentRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:generateContent');
return _client.post(url, GenerateContentResponse.fromJson, request);
}
/// Lists the [`Model`s](https://ai.google.dev/gemini-api/docs/models/gemini)
/// available through the Gemini API.
///
/// [pageToken] A page token, received from a previous `ListModels` call.
/// Provide the `page_token` returned by one request as an argument to the
/// next request to retrieve the next page. When paginating, all other
/// parameters provided to `ListModels` must match the call that provided the
/// page token.
///
/// [pageSize] The maximum number of `Models` to return (per page). If
/// unspecified, 50 models will be returned per page. This method returns at
/// most 1000 models per page, even if you pass a larger page_size.
Future<ListModelsResponse> list({String? pageToken, int? pageSize}) async {
final url = Uri.https(_authority, 'v1/models', {
if (pageToken != null) 'pageToken': pageToken,
if (pageSize != null) 'pageSize': '$pageSize',
});
return _client.get(url, ListModelsResponse.fromJson);
}
/// Generates a text embedding vector from the input `Content` using the
/// specified [Gemini Embedding
/// model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding).
///
/// [model] Required. The model's resource name. This serves as an ID for the
/// Model to use. This name should match a model name returned by the
/// `ListModels` method. Format: `models/{model}`
Future<EmbedContentResponse> embedContent(
EmbedContentRequest request, {
required String model,
}) async {
final url = Uri.https(_authority, 'v1/$model:embedContent');
return _client.post(url, EmbedContentResponse.fromJson, request);
}
}
/// The generated class for the `models.operations` resource.
class ModelsOperationsResource {
ModelsOperationsResource._(this._client);
final ApiClient _client;
/// Gets the latest state of a long-running operation.
///
/// Clients can use this method to poll the operation result at intervals as
/// recommended by the API service.
///
/// [name] The name of the operation resource.
Future<Operation> get({required String name}) async {
final url = Uri.https(_authority, 'v1/$name');
return _client.get(url, Operation.fromJson);
}
/// Lists operations that match the specified filter in the request.
///
/// If the server doesn't support this method, it returns `UNIMPLEMENTED`.
///
/// [name] The name of the operation's parent resource.
///
/// [pageSize] The standard list page size.
///
/// [filter] The standard list filter.
///
/// [pageToken] The standard list page token.
Future<ListOperationsResponse> list({
required String name,
int? pageSize,
String? filter,
String? pageToken,
}) async {
final url = Uri.https(_authority, 'v1/$name/operations', {
if (pageSize != null) 'pageSize': '$pageSize',
if (filter != null) 'filter': filter,
if (pageToken != null) 'pageToken': pageToken,
});
return _client.get(url, ListOperationsResponse.fromJson);
}
}
/// The generated class for the `operations` resource.
class OperationsResource {
OperationsResource._(this._client);
final ApiClient _client;
/// Lists operations that match the specified filter in the request.
///
/// If the server doesn't support this method, it returns `UNIMPLEMENTED`.
///
/// [name] The name of the operation's parent resource.
///
/// [pageSize] The standard list page size.
///
/// [pageToken] The standard list page token.
///
/// [filter] The standard list filter.
Future<ListOperationsResponse> list({
required String name,
int? pageSize,
String? pageToken,
String? filter,
}) async {
final url = Uri.https(_authority, 'v1/$name', {
if (pageSize != null) 'pageSize': '$pageSize',
if (pageToken != null) 'pageToken': pageToken,
if (filter != null) 'filter': filter,
});
return _client.get(url, ListOperationsResponse.fromJson);
}
/// Deletes a long-running operation.
///
/// This method indicates that the client is no longer interested in the
/// operation result. It does not cancel the operation. If the server doesn't
/// support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
///
/// [name] The name of the operation resource to be deleted.
Future<Empty> delete({required String name}) async {
final url = Uri.https(_authority, 'v1/$name');
return _client.delete(url, Empty.fromJson);
}
}
/// The response message for Operations.ListOperations.
class ListOperationsResponse extends ProtoMessage {
ListOperationsResponse({List<Operation>? operations, this.nextPageToken})
: operations = operations ?? [];
factory ListOperationsResponse.fromJson(Map<String, Object?> json) =>
ListOperationsResponse(
operations: _cvt.$1.decode(json['operations']),
nextPageToken: _cvt.$2.decode(json['nextPageToken']),
);
static final _cvt = (
ListType(SchemaType(Operation.fromJson)),
FieldType.stringType,
);
/// A list of operations that matches the specified filter in the request.
final List<Operation> operations;
/// The standard List next-page token.
final String? nextPageToken;
@override
Map<String, Object?> toJson() => {
'operations': _cvt.$1.encode(operations),
if (nextPageToken != null) 'nextPageToken': _cvt.$2.encode(nextPageToken),
};
}
/// Metadata about the state and progress of creating a tuned model returned
/// from the long-running operation
class CreateTunedModelMetadata extends ProtoMessage {
CreateTunedModelMetadata({
List<TuningSnapshot>? snapshots,
this.tunedModel,
this.completedSteps,
this.completedPercent,
this.totalSteps,
}) : snapshots = snapshots ?? [];
factory CreateTunedModelMetadata.fromJson(Map<String, Object?> json) =>
CreateTunedModelMetadata(
snapshots: _cvt.$1.decode(json['snapshots']),
tunedModel: _cvt.$2.decode(json['tunedModel']),
completedSteps: _cvt.$3.decode(json['completedSteps']),
completedPercent: _cvt.$4.decode(json['completedPercent']),
totalSteps: _cvt.$3.decode(json['totalSteps']),
);
static final _cvt = (
ListType(SchemaType(TuningSnapshot.fromJson)),
FieldType.stringType,
FieldType.int32Type,
FieldType.doubleType,
);
/// Metrics collected during tuning.
final List<TuningSnapshot> snapshots;
/// Name of the tuned model associated with the tuning operation.
final String? tunedModel;
/// The number of steps completed.
final int? completedSteps;
/// The completed percentage for the tuning operation.
final double? completedPercent;
/// The total number of tuning steps.
final int? totalSteps;
@override
Map<String, Object?> toJson() => {
'snapshots': _cvt.$1.encode(snapshots),
if (tunedModel != null) 'tunedModel': _cvt.$2.encode(tunedModel),
if (completedSteps != null)
'completedSteps': _cvt.$3.encode(completedSteps),
if (completedPercent != null)
'completedPercent': _cvt.$4.encode(completedPercent),
if (totalSteps != null) 'totalSteps': _cvt.$3.encode(totalSteps),
};
}
/// Segment of the content.
class Segment extends ProtoMessage {
Segment({this.text, this.startIndex, this.endIndex, this.partIndex});
factory Segment.fromJson(Map<String, Object?> json) => Segment(
text: _cvt.$1.decode(json['text']),
startIndex: _cvt.$2.decode(json['startIndex']),
endIndex: _cvt.$2.decode(json['endIndex']),
partIndex: _cvt.$2.decode(json['partIndex']),
);
static final _cvt = (FieldType.stringType, FieldType.int32Type);
/// Output only. The text corresponding to the segment from the response.
@readonly
final String? text;
/// Output only. Start index in the given Part, measured in bytes.
///
/// Offset from the start of the Part, inclusive, starting at zero.
@readonly
final int? startIndex;
/// Output only. End index in the given Part, measured in bytes.
///
/// Offset from the start of the Part, exclusive, starting at zero.
@readonly
final int? endIndex;
/// Output only. The index of a Part object within its parent Content object.
@readonly
final int? partIndex;
@override
Map<String, Object?> toJson() => {
if (text != null) 'text': _cvt.$1.encode(text),
if (startIndex != null) 'startIndex': _cvt.$2.encode(startIndex),
if (endIndex != null) 'endIndex': _cvt.$2.encode(endIndex),
if (partIndex != null) 'partIndex': _cvt.$2.encode(partIndex),
};
}
/// This resource represents a long-running operation that is the result of a
/// network API call.
class Operation extends ProtoMessage {
Operation({this.done, this.error, this.response, this.name, this.metadata});
factory Operation.fromJson(Map<String, Object?> json) => Operation(
done: _cvt.$1.decode(json['done']),
error: _cvt.$2.decode(json['error']),
response: _cvt.$3.decode(json['response']),
name: _cvt.$4.decode(json['name']),
metadata: _cvt.$3.decode(json['metadata']),
);
static final _cvt = (
FieldType.boolType,
SchemaType(Status.fromJson),
FieldType.anyType,
FieldType.stringType,
);
/// If the value is `false`, it means the operation is still in progress.
///
/// If `true`, the operation is completed, and either `error` or `response` is
/// available.
final bool? done;
/// The error result of the operation in case of failure or cancellation.
final Status? error;
/// The normal, successful response of the operation.
///
/// If the original method returns no data on success, such as `Delete`, the
/// response is `google.protobuf.Empty`. If the original method is standard
/// `Get`/`Create`/`Update`, the response should be the resource. For other
/// methods, the response should have the type `XxxResponse`, where `Xxx` is
/// the original method name. For example, if the original method name is
/// `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
final Any? response;
/// The server-assigned name, which is only unique within the same service
/// that originally returns it.
///
/// If you use the default HTTP mapping, the `name` should be a resource name
/// ending with `operations/{unique_id}`.
final String? name;
/// Service-specific metadata associated with the operation.
///
/// It typically contains progress information and common metadata such as
/// create time. Some services might not provide such metadata. Any method
/// that returns a long-running operation should document the metadata type,
/// if any.
final Any? metadata;
@override
Map<String, Object?> toJson() => {
if (done != null) 'done': _cvt.$1.encode(done),
if (error != null) 'error': _cvt.$2.encode(error),
if (response != null) 'response': _cvt.$3.encode(response),
if (name != null) 'name': _cvt.$4.encode(name),
if (metadata != null) 'metadata': _cvt.$3.encode(metadata),
};
}
/// Request to generate a completion from the model.
class GenerateContentRequest extends ProtoMessage {
GenerateContentRequest({
List<Content>? contents,
this.model,
List<SafetySetting>? safetySettings,
this.generationConfig,
}) : contents = contents ?? [],
safetySettings = safetySettings ?? [];
factory GenerateContentRequest.fromJson(Map<String, Object?> json) =>
GenerateContentRequest(
contents: _cvt.$1.decode(json['contents']),
model: _cvt.$2.decode(json['model']),
safetySettings: _cvt.$3.decode(json['safetySettings']),
generationConfig: _cvt.$4.decode(json['generationConfig']),
);
static final _cvt = (
ListType(SchemaType(Content.fromJson)),
FieldType.stringType,
ListType(SchemaType(SafetySetting.fromJson)),
SchemaType(GenerationConfig.fromJson),
);
/// Required. The content of the current conversation with the model.
///
/// For single-turn queries, this is a single instance. For multi-turn queries
/// like [chat](https://ai.google.dev/gemini-api/docs/text-generation#chat),
/// this is a repeated field that contains the conversation history and the
/// latest request.
final List<Content> contents;
/// Required. The name of the `Model` to use for generating the completion.
///
/// Format: `models/{model}`.
final String? model;
/// Optional. A list of unique `SafetySetting` instances for blocking unsafe
/// content.
///
/// This will be enforced on the `GenerateContentRequest.contents` and
/// `GenerateContentResponse.candidates`. There should not be more than one
/// setting for each `SafetyCategory` type. The API will block any contents
/// and responses that fail to meet the thresholds set by these settings. This
/// list overrides the default settings for each `SafetyCategory` specified in
/// the safety_settings. If there is no `SafetySetting` for a given
/// `SafetyCategory` provided in the list, the API will use the default safety
/// setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH,
/// HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT,
/// HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY are supported.
/// Refer to the
/// [guide](https://ai.google.dev/gemini-api/docs/safety-settings) for
/// detailed information on available safety settings. Also refer to the
/// [Safety guidance](https://ai.google.dev/gemini-api/docs/safety-guidance)
/// to learn how to incorporate safety considerations in your AI applications.
final List<SafetySetting> safetySettings;
/// Optional. Configuration options for model generation and outputs.
final GenerationConfig? generationConfig;
@override
Map<String, Object?> toJson() => {
'contents': _cvt.$1.encode(contents),
if (model != null) 'model': _cvt.$2.encode(model),
'safetySettings': _cvt.$3.encode(safetySettings),
if (generationConfig != null)
'generationConfig': _cvt.$4.encode(generationConfig),
};
}
/// Chunk from the web.
class Web extends ProtoMessage {
Web({this.uri, this.title});
factory Web.fromJson(Map<String, Object?> json) => Web(
uri: _cvt.$1.decode(json['uri']),
title: _cvt.$1.decode(json['title']),
);
static final _cvt = (FieldType.stringType,);
/// URI reference of the chunk.
final String? uri;
/// Title of the chunk.
final String? title;
@override
Map<String, Object?> toJson() => {
if (uri != null) 'uri': _cvt.$1.encode(uri),
if (title != null) 'title': _cvt.$1.encode(title),
};
}
/// A citation to a source for a portion of a specific response.
class CitationSource extends ProtoMessage {
CitationSource({this.startIndex, this.license, this.endIndex, this.uri});
factory CitationSource.fromJson(Map<String, Object?> json) => CitationSource(
startIndex: _cvt.$1.decode(json['startIndex']),
license: _cvt.$2.decode(json['license']),
endIndex: _cvt.$1.decode(json['endIndex']),
uri: _cvt.$2.decode(json['uri']),
);
static final _cvt = (FieldType.int32Type, FieldType.stringType);
/// Optional. Start of segment of the response that is attributed to this
/// source.
///
/// Index indicates the start of the segment, measured in bytes.
final int? startIndex;
/// Optional. License for the GitHub project that is attributed as a source
/// for segment.
///
/// License info is required for code citations.
final String? license;
/// Optional. End of the attributed segment, exclusive.
final int? endIndex;
/// Optional. URI that is attributed as a source for a portion of the text.
final String? uri;
@override
Map<String, Object?> toJson() => {
if (startIndex != null) 'startIndex': _cvt.$1.encode(startIndex),
if (license != null) 'license': _cvt.$2.encode(license),
if (endIndex != null) 'endIndex': _cvt.$1.encode(endIndex),
if (uri != null) 'uri': _cvt.$2.encode(uri),
};
}
/// Candidates with top log probabilities at each decoding step.
class TopCandidates extends ProtoMessage {
TopCandidates({List<LogprobsResultCandidate>? candidates})
: candidates = candidates ?? [];
factory TopCandidates.fromJson(Map<String, Object?> json) =>
TopCandidates(candidates: _cvt.$1.decode(json['candidates']));
static final _cvt = (ListType(SchemaType(LogprobsResultCandidate.fromJson)),);
/// Sorted by log probability in descending order.
final List<LogprobsResultCandidate> candidates;
@override
Map<String, Object?> toJson() => {'candidates': _cvt.$1.encode(candidates)};
}
/// Record for a single tuning step.
class TuningSnapshot extends ProtoMessage {
TuningSnapshot({this.meanLoss, this.step, this.epoch, this.computeTime});
factory TuningSnapshot.fromJson(Map<String, Object?> json) => TuningSnapshot(
meanLoss: _cvt.$1.decode(json['meanLoss']),
step: _cvt.$2.decode(json['step']),
epoch: _cvt.$2.decode(json['epoch']),
computeTime: _cvt.$3.decode(json['computeTime']),
);
static final _cvt = (
FieldType.doubleType,
FieldType.int32Type,
FieldType.dateTimeType,
);
/// Output only. The mean loss of the training examples for this step.
@readonly
final double? meanLoss;
/// Output only. The tuning step.
@readonly
final int? step;
/// Output only. The epoch this step was part of.
@readonly
final int? epoch;
/// Output only. The timestamp when this metric was computed.
@readonly
final GoogleDateTime? computeTime;
@override
Map<String, Object?> toJson() => {
if (meanLoss != null) 'meanLoss': _cvt.$1.encode(meanLoss),
if (step != null) 'step': _cvt.$2.encode(step),
if (epoch != null) 'epoch': _cvt.$2.encode(epoch),
if (computeTime != null) 'computeTime': _cvt.$3.encode(computeTime),
};
}
/// Candidate for the logprobs token and score.
class LogprobsResultCandidate extends ProtoMessage {
LogprobsResultCandidate({this.token, this.logProbability, this.tokenId});
factory LogprobsResultCandidate.fromJson(Map<String, Object?> json) =>
LogprobsResultCandidate(
token: _cvt.$1.decode(json['token']),
logProbability: _cvt.$2.decode(json['logProbability']),
tokenId: _cvt.$3.decode(json['tokenId']),
);
static final _cvt = (
FieldType.stringType,
FieldType.doubleType,
FieldType.int32Type,
);
/// The candidate’s token string value.
final String? token;
/// The candidate's log probability.
final double? logProbability;
/// The candidate’s token id value.
final int? tokenId;
@override
Map<String, Object?> toJson() => {
if (token != null) 'token': _cvt.$1.encode(token),
if (logProbability != null)
'logProbability': _cvt.$2.encode(logProbability),
if (tokenId != null) 'tokenId': _cvt.$3.encode(tokenId),
};
}
/// Response from the model supporting multiple candidate responses.
///
/// Safety ratings and content filtering are reported for both prompt in
/// `GenerateContentResponse.prompt_feedback` and for each candidate in
/// `finish_reason` and in `safety_ratings`. The API: - Returns either all
/// requested candidates or none of them - Returns no candidates at all only if
/// there was something wrong with the prompt (check `prompt_feedback`) -
/// Reports feedback on each candidate in `finish_reason` and `safety_ratings`.
class GenerateContentResponse extends ProtoMessage {
GenerateContentResponse({
List<Candidate>? candidates,
this.promptFeedback,
this.usageMetadata,
this.modelVersion,
}) : candidates = candidates ?? [];
factory GenerateContentResponse.fromJson(Map<String, Object?> json) =>
GenerateContentResponse(
candidates: _cvt.$1.decode(json['candidates']),
promptFeedback: _cvt.$2.decode(json['promptFeedback']),
usageMetadata: _cvt.$3.decode(json['usageMetadata']),
modelVersion: _cvt.$4.decode(json['modelVersion']),
);
static final _cvt = (
ListType(SchemaType(Candidate.fromJson)),
SchemaType(PromptFeedback.fromJson),
SchemaType(UsageMetadata.fromJson),
FieldType.stringType,
);
/// Candidate responses from the model.
final List<Candidate> candidates;
/// Returns the prompt's feedback related to the content filters.
final PromptFeedback? promptFeedback;
/// Output only. Metadata on the generation requests' token usage.
@readonly
final UsageMetadata? usageMetadata;
/// Output only. The model version used to generate the response.
@readonly
final String? modelVersion;
@override
Map<String, Object?> toJson() => {
'candidates': _cvt.$1.encode(candidates),
if (promptFeedback != null)
'promptFeedback': _cvt.$2.encode(promptFeedback),
if (usageMetadata != null) 'usageMetadata': _cvt.$3.encode(usageMetadata),
if (modelVersion != null) 'modelVersion': _cvt.$4.encode(modelVersion),
};
}
/// The response to an `EmbedContentRequest`.
class EmbedContentResponse extends ProtoMessage {
EmbedContentResponse({this.embedding});
factory EmbedContentResponse.fromJson(Map<String, Object?> json) =>
EmbedContentResponse(embedding: _cvt.$1.decode(json['embedding']));
static final _cvt = (SchemaType(ContentEmbedding.fromJson),);
/// Output only. The embedding generated from the input content.
@readonly
final ContentEmbedding? embedding;
@override
Map<String, Object?> toJson() => {
if (embedding != null) 'embedding': _cvt.$1.encode(embedding),
};
}
/// A set of the feedback metadata the prompt specified in
/// `GenerateContentRequest.content`.
class PromptFeedback extends ProtoMessage {
PromptFeedback({this.blockReason, List<SafetyRating>? safetyRatings})
: safetyRatings = safetyRatings ?? [];
factory PromptFeedback.fromJson(Map<String, Object?> json) => PromptFeedback(
blockReason: _cvt.$1.decode(json['blockReason']),
safetyRatings: _cvt.$2.decode(json['safetyRatings']),
);
static final _cvt = (
FieldType.stringType,
ListType(SchemaType(SafetyRating.fromJson)),
);
/// Optional. If set, the prompt was blocked and no candidates are returned.
///
/// Rephrase the prompt.
///
/// See [PromptFeedbackBlockReasonEnum] for valid enum values.
final String? blockReason;
/// Ratings for safety of the prompt.
///
/// There is at most one rating per category.
final List<SafetyRating> safetyRatings;
@override
Map<String, Object?> toJson() => {
if (blockReason != null) 'blockReason': _cvt.$1.encode(blockReason),
'safetyRatings': _cvt.$2.encode(safetyRatings),
};
}
/// The enum values for [PromptFeedback.blockReason].
///
/// Note that the set of enum values is not closed; we may see additional values
/// that are not specified here.
class PromptFeedbackBlockReasonEnum {
PromptFeedbackBlockReasonEnum._();
/// Default value. This value is unused.
static const String blockReasonUnspecified = 'BLOCK_REASON_UNSPECIFIED';
/// Prompt was blocked due to safety reasons. Inspect `safety_ratings` to
/// understand which safety category blocked it.
static const String safety = 'SAFETY';
/// Prompt was blocked due to unknown reasons.
static const String other = 'OTHER';
/// Prompt was blocked due to the terms which are included from the
/// terminology blocklist.
static const String blocklist = 'BLOCKLIST';
/// Prompt was blocked due to prohibited content.
static const String prohibitedContent = 'PROHIBITED_CONTENT';
/// Candidates blocked due to unsafe image generation content.
static const String imageSafety = 'IMAGE_SAFETY';
}
/// Counts the number of tokens in the `prompt` sent to a model.
///
/// Models may tokenize text differently, so each model may return a different
/// `token_count`.
class CountTokensRequest extends ProtoMessage {
CountTokensRequest({this.generateContentRequest, List<Content>? contents})
: contents = contents ?? [];
factory CountTokensRequest.fromJson(Map<String, Object?> json) =>
CountTokensRequest(
generateContentRequest: _cvt.$1.decode(json['generateContentRequest']),
contents: _cvt.$2.decode(json['contents']),
);
static final _cvt = (
SchemaType(GenerateContentRequest.fromJson),
ListType(SchemaType(Content.fromJson)),
);
/// Optional. The overall input given to the `Model`.
///
/// This includes the prompt as well as other model steering information like
/// [system
/// instructions](https://ai.google.dev/gemini-api/docs/system-instructions),
/// and/or function declarations for [function
/// calling](https://ai.google.dev/gemini-api/docs/function-calling).
/// `Model`s/`Content`s and `generate_content_request`s are mutually
/// exclusive. You can either send `Model` + `Content`s or a
/// `generate_content_request`, but never both.
final GenerateContentRequest? generateContentRequest;
/// Optional. The input given to the model as a prompt.
///
/// This field is ignored when `generate_content_request` is set.
final List<Content> contents;
@override
Map<String, Object?> toJson() => {
if (generateContentRequest != null)
'generateContentRequest': _cvt.$1.encode(generateContentRequest),
'contents': _cvt.$2.encode(contents),
};
}
/// Request containing the `Content` for the model to embed.
class EmbedContentRequest extends ProtoMessage {
EmbedContentRequest({
this.model,
this.title,
this.content,
this.outputDimensionality,
this.taskType,
});
factory EmbedContentRequest.fromJson(Map<String, Object?> json) =>
EmbedContentRequest(
model: _cvt.$1.decode(json['model']),
title: _cvt.$1.decode(json['title']),
content: _cvt.$2.decode(json['content']),
outputDimensionality: _cvt.$3.decode(json['outputDimensionality']),
taskType: _cvt.$1.decode(json['taskType']),
);
static final _cvt = (
FieldType.stringType,
SchemaType(Content.fromJson),
FieldType.int32Type,
);
/// Required. The model's resource name.
///
/// This serves as an ID for the Model to use. This name should match a model
/// name returned by the `ListModels` method. Format: `models/{model}`
final String? model;
/// Optional. An optional title for the text.
///
/// Only applicable when TaskType is `RETRIEVAL_DOCUMENT`. Note: Specifying a
/// `title` for `RETRIEVAL_DOCUMENT` provides better quality embeddings for
/// retrieval.
final String? title;
/// Required. The content to embed.
///
/// Only the `parts.text` fields will be counted.
final Content? content;
/// Optional. Optional reduced dimension for the output embedding.
///
/// If set, excessive values in the output embedding are truncated from the
/// end. Supported by newer models since 2024 only. You cannot set this value
/// if using the earlier model (`models/embedding-001`).
final int? outputDimensionality;
/// Optional. Optional task type for which the embeddings will be used.
///
/// Can only be set for `models/embedding-001`.
///
/// See [EmbedContentRequestTaskTypeEnum] for valid enum values.
final String? taskType;
@override
Map<String, Object?> toJson() => {
if (model != null) 'model': _cvt.$1.encode(model),
if (title != null) 'title': _cvt.$1.encode(title),
if (content != null) 'content': _cvt.$2.encode(content),
if (outputDimensionality != null)
'outputDimensionality': _cvt.$3.encode(outputDimensionality),
if (taskType != null) 'taskType': _cvt.$1.encode(taskType),
};
}
/// The enum values for [EmbedContentRequest.taskType].
///
/// Note that the set of enum values is not closed; we may see additional values
/// that are not specified here.
class EmbedContentRequestTaskTypeEnum {
EmbedContentRequestTaskTypeEnum._();
/// Unset value, which will default to one of the other enum values.
static const String taskTypeUnspecified = 'TASK_TYPE_UNSPECIFIED';
/// Specifies the given text is a query in a search/retrieval setting.
static const String retrievalQuery = 'RETRIEVAL_QUERY';
/// Specifies the given text is a document from the corpus being searched.
static const String retrievalDocument = 'RETRIEVAL_DOCUMENT';
/// Specifies the given text will be used for STS.
static const String semanticSimilarity = 'SEMANTIC_SIMILARITY';
/// Specifies that the given text will be classified.
static const String classification = 'CLASSIFICATION';
/// Specifies that the embeddings will be used for clustering.
static const String clustering = 'CLUSTERING';
/// Specifies that the given text will be used for question answering.
static const String questionAnswering = 'QUESTION_ANSWERING';
/// Specifies that the given text will be used for fact verification.
static const String factVerification = 'FACT_VERIFICATION';
}
/// A datatype containing media that is part of a multi-part `Content` message.
///
/// A `Part` consists of data which has an associated datatype. A `Part` can
/// only contain one of the accepted types in `Part.data`. A `Part` must have a
/// fixed IANA MIME type identifying the type and subtype of the media if the
/// `inline_data` field is filled with raw bytes.
class Part extends ProtoMessage {
Part({this.text, this.inlineData});
factory Part.fromJson(Map<String, Object?> json) => Part(
text: _cvt.$1.decode(json['text']),
inlineData: _cvt.$2.decode(json['inlineData']),
);
static final _cvt = (FieldType.stringType, SchemaType(Blob.fromJson));
/// Inline text.
final String? text;
/// Inline media bytes.
final Blob? inlineData;
@override
Map<String, Object?> toJson() => {
if (text != null) 'text': _cvt.$1.encode(text),
if (inlineData != null) 'inlineData': _cvt.$2.encode(inlineData),
};
}
/// Google search entry point.
class SearchEntryPoint extends ProtoMessage {
SearchEntryPoint({this.sdkBlob, this.renderedContent});
factory SearchEntryPoint.fromJson(Map<String, Object?> json) =>
SearchEntryPoint(
sdkBlob: _cvt.$1.decode(json['sdkBlob']),
renderedContent: _cvt.$2.decode(json['renderedContent']),
);
static final _cvt = (FieldType.byteType, FieldType.stringType);
/// Optional. Base64 encoded JSON representing array of tuple.
final Uint8List? sdkBlob;
/// Optional. Web content snippet that can be embedded in a web page or an app
/// webview.
final String? renderedContent;
@override
Map<String, Object?> toJson() => {
if (sdkBlob != null) 'sdkBlob': _cvt.$1.encode(sdkBlob),
if (renderedContent != null)
'renderedContent': _cvt.$2.encode(renderedContent),
};
}
/// The base structured datatype containing multi-part content of a message.
///
/// A `Content` includes a `role` field designating the producer of the
/// `Content` and a `parts` field containing multi-part data that contains the
/// content of the message turn.
class Content extends ProtoMessage {
Content({List<Part>? parts, this.role}) : parts = parts ?? [];
factory Content.fromJson(Map<String, Object?> json) => Content(
parts: _cvt.$1.decode(json['parts']),
role: _cvt.$2.decode(json['role']),
);
static final _cvt = (
ListType(SchemaType(Part.fromJson)),
FieldType.stringType,
);
/// Ordered `Parts` that constitute a single message.
///
/// Parts may have different MIME types.
final List<Part> parts;
/// Optional. The producer of the content.
///
/// Must be either 'user' or 'model'. Useful to set for multi-turn
/// conversations, otherwise can be left blank or unset.
final String? role;
@override
Map<String, Object?> toJson() => {
'parts': _cvt.$1.encode(parts),
if (role != null) 'role': _cvt.$2.encode(role),
};
}
/// Configuration options for model generation and outputs.
///
/// Not all parameters are configurable for every model.
class GenerationConfig extends ProtoMessage {
GenerationConfig({
this.presencePenalty,
List<String>? stopSequences,
this.topP,
this.maxOutputTokens,
this.enableEnhancedCivicAnswers,
this.topK,
this.temperature,
this.candidateCount,
this.responseLogprobs,
this.frequencyPenalty,
this.logprobs,
}) : stopSequences = stopSequences ?? [];
factory GenerationConfig.fromJson(Map<String, Object?> json) =>
GenerationConfig(
presencePenalty: _cvt.$1.decode(json['presencePenalty']),
stopSequences: _cvt.$2.decode(json['stopSequences']),
topP: _cvt.$1.decode(json['topP']),
maxOutputTokens: _cvt.$3.decode(json['maxOutputTokens']),
enableEnhancedCivicAnswers: _cvt.$4.decode(
json['enableEnhancedCivicAnswers'],
),
topK: _cvt.$3.decode(json['topK']),
temperature: _cvt.$1.decode(json['temperature']),
candidateCount: _cvt.$3.decode(json['candidateCount']),
responseLogprobs: _cvt.$4.decode(json['responseLogprobs']),
frequencyPenalty: _cvt.$1.decode(json['frequencyPenalty']),
logprobs: _cvt.$3.decode(json['logprobs']),
);
static final _cvt = (
FieldType.doubleType,
ListType(FieldType.stringType),
FieldType.int32Type,
FieldType.boolType,
);
/// Optional. Presence penalty applied to the next token's logprobs if the
/// token has already been seen in the response.
///
/// This penalty is binary on/off and not dependant on the number of times the
/// token is used (after the first). Use frequency_penalty for a penalty that
/// increases with each use. A positive penalty will discourage the use of
/// tokens that have already been used in the response, increasing the
/// vocabulary. A negative penalty will encourage the use of tokens that have
/// already been used in the response, decreasing the vocabulary.
final double? presencePenalty;
/// Optional. The set of character sequences (up to 5) that will stop output
/// generation.
///
/// If specified, the API will stop at the first appearance of a
/// `stop_sequence`. The stop sequence will not be included as part of the
/// response.
final List<String> stopSequences;
/// Optional. The maximum cumulative probability of tokens to consider when
/// sampling.
///
/// The model uses combined Top-k and Top-p (nucleus) sampling. Tokens are
/// sorted based on their assigned probabilities so that only the most likely
/// tokens are considered. Top-k sampling directly limits the maximum number
/// of tokens to consider, while Nucleus sampling limits the number of tokens
/// based on the cumulative probability. Note: The default value varies by
/// `Model` and is specified by the`Model.top_p` attribute returned from the
/// `getModel` function. An empty `top_k` attribute indicates that the model
/// doesn't apply top-k sampling and doesn't allow setting `top_k` on
/// requests.
final double? topP;
/// Optional. The maximum number of tokens to include in a response candidate.
///
/// Note: The default value varies by model, see the
/// `Model.output_token_limit` attribute of the `Model` returned from the
/// `getModel` function.
final int? maxOutputTokens;
/// Optional. Enables enhanced civic answers.
///
/// It may not be available for all models.
final bool? enableEnhancedCivicAnswers;
/// Optional. The maximum number of tokens to consider when sampling.
///
/// Gemini models use Top-p (nucleus) sampling or a combination of Top-k and
/// nucleus sampling. Top-k sampling considers the set of `top_k` most
/// probable tokens. Models running with nucleus sampling don't allow top_k
/// setting. Note: The default value varies by `Model` and is specified by
/// the`Model.top_p` attribute returned from the `getModel` function. An empty
/// `top_k` attribute indicates that the model doesn't apply top-k sampling
/// and doesn't allow setting `top_k` on requests.
final int? topK;
/// Optional. Controls the randomness of the output.
///
/// Note: The default value varies by model, see the `Model.temperature`
/// attribute of the `Model` returned from the `getModel` function. Values can
/// range from [0.0, 2.0].
final double? temperature;
/// Optional. Number of generated responses to return.
///
/// Currently, this value can only be set to 1. If unset, this will default to
/// 1.
final int? candidateCount;
/// Optional. If true, export the logprobs results in response.
final bool? responseLogprobs;
/// Optional. Frequency penalty applied to the next token's logprobs,
/// multiplied by the number of times each token has been seen in the
/// respponse so far.
///
/// A positive penalty will discourage the use of tokens that have already
/// been used, proportional to the number of times the token has been used:
/// The more a token is used, the more dificult it is for the model to use
/// that token again increasing the vocabulary of responses. Caution: A
/// _negative_ penalty will encourage the model to reuse tokens proportional
/// to the number of times the token has been used. Small negative values will
/// reduce the vocabulary of a response. Larger negative values will cause the
/// model to start repeating a common token until it hits the
/// max_output_tokens limit.
final double? frequencyPenalty;
/// Optional. Only valid if response_logprobs=True.
///
/// This sets the number of top logprobs to return at each decoding step in
/// the Candidate.logprobs_result.
final int? logprobs;
@override
Map<String, Object?> toJson() => {
if (presencePenalty != null)
'presencePenalty': _cvt.$1.encode(presencePenalty),
'stopSequences': _cvt.$2.encode(stopSequences),
if (topP != null) 'topP': _cvt.$1.encode(topP),
if (maxOutputTokens != null)
'maxOutputTokens': _cvt.$3.encode(maxOutputTokens),
if (enableEnhancedCivicAnswers != null)
'enableEnhancedCivicAnswers': _cvt.$4.encode(enableEnhancedCivicAnswers),
if (topK != null) 'topK': _cvt.$3.encode(topK),
if (temperature != null) 'temperature': _cvt.$1.encode(temperature),
if (candidateCount != null)
'candidateCount': _cvt.$3.encode(candidateCount),
if (responseLogprobs != null)
'responseLogprobs': _cvt.$4.encode(responseLogprobs),
if (frequencyPenalty != null)
'frequencyPenalty': _cvt.$1.encode(frequencyPenalty),
if (logprobs != null) 'logprobs': _cvt.$3.encode(logprobs),
};
}
/// A response candidate generated from the model.
class Candidate extends ProtoMessage {
Candidate({
this.logprobsResult,
List<SafetyRating>? safetyRatings,
this.tokenCount,
this.index,
this.avgLogprobs,
this.content,
this.citationMetadata,
this.groundingMetadata,
this.finishReason,
}) : safetyRatings = safetyRatings ?? [];
factory Candidate.fromJson(Map<String, Object?> json) => Candidate(
logprobsResult: _cvt.$1.decode(json['logprobsResult']),
safetyRatings: _cvt.$2.decode(json['safetyRatings']),
tokenCount: _cvt.$3.decode(json['tokenCount']),
index: _cvt.$3.decode(json['index']),
avgLogprobs: _cvt.$4.decode(json['avgLogprobs']),
content: _cvt.$5.decode(json['content']),
citationMetadata: _cvt.$6.decode(json['citationMetadata']),
groundingMetadata: _cvt.$7.decode(json['groundingMetadata']),
finishReason: _cvt.$8.decode(json['finishReason']),
);
static final _cvt = (
SchemaType(LogprobsResult.fromJson),
ListType(SchemaType(SafetyRating.fromJson)),
FieldType.int32Type,
FieldType.doubleType,
SchemaType(Content.fromJson),
SchemaType(CitationMetadata.fromJson),
SchemaType(GroundingMetadata.fromJson),
FieldType.stringType,
);
/// Output only. Log-likelihood scores for the response tokens and top tokens
@readonly
final LogprobsResult? logprobsResult;
/// List of ratings for the safety of a response candidate.
///
/// There is at most one rating per category.
final List<SafetyRating> safetyRatings;
/// Output only. Token count for this candidate.
@readonly
final int? tokenCount;
/// Output only. Index of the candidate in the list of response candidates.
@readonly
final int? index;
/// Output only. Average log probability score of the candidate.
@readonly
final double? avgLogprobs;
/// Output only. Generated content returned from the model.
@readonly
final Content? content;
/// Output only. Citation information for model-generated candidate.
///
/// This field may be populated with recitation information for any text
/// included in the `content`. These are passages that are "recited" from
/// copyrighted material in the foundational LLM's training data.
@readonly
final CitationMetadata? citationMetadata;
/// Output only. Grounding metadata for the candidate.
///
/// This field is populated for `GenerateContent` calls.
@readonly
final GroundingMetadata? groundingMetadata;
/// Optional. Output only.
///
/// The reason why the model stopped generating tokens. If empty, the model
/// has not stopped generating tokens.
///
/// See [CandidateFinishReasonEnum] for valid enum values.
@readonly
final String? finishReason;
@override
Map<String, Object?> toJson() => {
if (logprobsResult != null)
'logprobsResult': _cvt.$1.encode(logprobsResult),
'safetyRatings': _cvt.$2.encode(safetyRatings),
if (tokenCount != null) 'tokenCount': _cvt.$3.encode(tokenCount),
if (index != null) 'index': _cvt.$3.encode(index),
if (avgLogprobs != null) 'avgLogprobs': _cvt.$4.encode(avgLogprobs),
if (content != null) 'content': _cvt.$5.encode(content),
if (citationMetadata != null)
'citationMetadata': _cvt.$6.encode(citationMetadata),
if (groundingMetadata != null)
'groundingMetadata': _cvt.$7.encode(groundingMetadata),
if (finishReason != null) 'finishReason': _cvt.$8.encode(finishReason),
};
}
/// The enum values for [Candidate.finishReason].
///
/// Note that the set of enum values is not closed; we may see additional values
/// that are not specified here.
class CandidateFinishReasonEnum {
CandidateFinishReasonEnum._();
/// Default value. This value is unused.
static const String finishReasonUnspecified = 'FINISH_REASON_UNSPECIFIED';
/// Natural stop point of the model or provided stop sequence.
static const String stop = 'STOP';
/// The maximum number of tokens as specified in the request was reached.
static const String maxTokens = 'MAX_TOKENS';
/// The response candidate content was flagged for safety reasons.
static const String safety = 'SAFETY';
/// The response candidate content was flagged for recitation reasons.
static const String recitation = 'RECITATION';
/// The response candidate content was flagged for using an unsupported
/// language.
static const String language = 'LANGUAGE';
/// Unknown reason.
static const String other = 'OTHER';
/// Token generation stopped because the content contains forbidden terms.
static const String blocklist = 'BLOCKLIST';
/// Token generation stopped for potentially containing prohibited content.
static const String prohibitedContent = 'PROHIBITED_CONTENT';
/// Token generation stopped because the content potentially contains
/// Sensitive Personally Identifiable Information (SPII).
static const String spii = 'SPII';
/// The function call generated by the model is invalid.
static const String malformedFunctionCall = 'MALFORMED_FUNCTION_CALL';
/// Token generation stopped because generated images contain safety
/// violations.
static const String imageSafety = 'IMAGE_SAFETY';
}
/// Metadata returned to client when grounding is enabled.
class GroundingMetadata extends ProtoMessage {
GroundingMetadata({
List<GroundingSupport>? groundingSupports,
List<String>? webSearchQueries,
this.retrievalMetadata,
this.searchEntryPoint,
List<GroundingChunk>? groundingChunks,
}) : groundingSupports = groundingSupports ?? [],
webSearchQueries = webSearchQueries ?? [],
groundingChunks = groundingChunks ?? [];
factory GroundingMetadata.fromJson(Map<String, Object?> json) =>
GroundingMetadata(
groundingSupports: _cvt.$1.decode(json['groundingSupports']),
webSearchQueries: _cvt.$2.decode(json['webSearchQueries']),
retrievalMetadata: _cvt.$3.decode(json['retrievalMetadata']),
searchEntryPoint: _cvt.$4.decode(json['searchEntryPoint']),
groundingChunks: _cvt.$5.decode(json['groundingChunks']),
);
static final _cvt = (
ListType(SchemaType(GroundingSupport.fromJson)),
ListType(FieldType.stringType),
SchemaType(RetrievalMetadata.fromJson),
SchemaType(SearchEntryPoint.fromJson),
ListType(SchemaType(GroundingChunk.fromJson)),
);
/// List of grounding support.
final List<GroundingSupport> groundingSupports;
/// Web search queries for the following-up web search.
final List<String> webSearchQueries;
/// Metadata related to retrieval in the grounding flow.
final RetrievalMetadata? retrievalMetadata;
/// Optional. Google search entry for the following-up web searches.
final SearchEntryPoint? searchEntryPoint;
/// List of supporting references retrieved from specified grounding source.
final List<GroundingChunk> groundingChunks;
@override
Map<String, Object?> toJson() => {
'groundingSupports': _cvt.$1.encode(groundingSupports),
'webSearchQueries': _cvt.$2.encode(webSearchQueries),
if (retrievalMetadata != null)
'retrievalMetadata': _cvt.$3.encode(retrievalMetadata),
if (searchEntryPoint != null)
'searchEntryPoint': _cvt.$4.encode(searchEntryPoint),
'groundingChunks': _cvt.$5.encode(groundingChunks),
};
}
/// Logprobs Result
class LogprobsResult extends ProtoMessage {
LogprobsResult({
List<LogprobsResultCandidate>? chosenCandidates,
List<TopCandidates>? topCandidates,
}) : chosenCandidates = chosenCandidates ?? [],
topCandidates = topCandidates ?? [];
factory LogprobsResult.fromJson(Map<String, Object?> json) => LogprobsResult(
chosenCandidates: _cvt.$1.decode(json['chosenCandidates']),
topCandidates: _cvt.$2.decode(json['topCandidates']),
);
static final _cvt = (
ListType(SchemaType(LogprobsResultCandidate.fromJson)),
ListType(SchemaType(TopCandidates.fromJson)),
);
/// Length = total number of decoding steps.
///
/// The chosen candidates may or may not be in top_candidates.
final List<LogprobsResultCandidate> chosenCandidates;
/// Length = total number of decoding steps.
final List<TopCandidates> topCandidates;
@override
Map<String, Object?> toJson() => {
'chosenCandidates': _cvt.$1.encode(chosenCandidates),
'topCandidates': _cvt.$2.encode(topCandidates),
};
}
/// Raw media bytes.
///
/// Text should not be sent as raw bytes, use the 'text' field.
class Blob extends ProtoMessage {
Blob({this.data, this.mimeType});
factory Blob.fromJson(Map<String, Object?> json) => Blob(
data: _cvt.$1.decode(json['data']),
mimeType: _cvt.$2.decode(json['mimeType']),
);
static final _cvt = (FieldType.byteType, FieldType.stringType);
/// Raw bytes for media formats.
final Uint8List? data;
/// The IANA standard MIME type of the source data.
///
/// Examples: - image/png - image/jpeg If an unsupported MIME type is
/// provided, an error will be returned. For a complete list of supported
/// types, see [Supported file
/// formats](https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats).
final String? mimeType;
@override
Map<String, Object?> toJson() => {
if (data != null) 'data': _cvt.$1.encode(data),
if (mimeType != null) 'mimeType': _cvt.$2.encode(mimeType),
};
}
/// Grounding chunk.
class GroundingChunk extends ProtoMessage {
GroundingChunk({this.web});
factory GroundingChunk.fromJson(Map<String, Object?> json) =>
GroundingChunk(web: _cvt.$1.decode(json['web']));
static final _cvt = (SchemaType(Web.fromJson),);
/// Grounding chunk from the web.
final Web? web;
@override
Map<String, Object?> toJson() => {
if (web != null) 'web': _cvt.$1.encode(web),
};
}
/// The request message for Operations.CancelOperation.
class CancelOperationRequest extends ProtoMessage {
CancelOperationRequest();
factory CancelOperationRequest.fromJson(Map<String, Object?> json) =>
CancelOperationRequest();
@override
Map<String, Object?> toJson() => {};
}
/// A collection of source attributions for a piece of content.
class CitationMetadata extends ProtoMessage {
CitationMetadata({List<CitationSource>? citationSources})
: citationSources = citationSources ?? [];
factory CitationMetadata.fromJson(Map<String, Object?> json) =>
CitationMetadata(
citationSources: _cvt.$1.decode(json['citationSources']),
);
static final _cvt = (ListType(SchemaType(CitationSource.fromJson)),);
/// Citations to sources for a specific response.
final List<CitationSource> citationSources;
@override
Map<String, Object?> toJson() => {
'citationSources': _cvt.$1.encode(citationSources),
};
}
/// Safety setting, affecting the safety-blocking behavior.
///
/// Passing a safety setting for a category changes the allowed probability that
/// content is blocked.
class SafetySetting extends ProtoMessage {
SafetySetting({this.threshold, this.category});
factory SafetySetting.fromJson(Map<String, Object?> json) => SafetySetting(
threshold: _cvt.$1.decode(json['threshold']),
category: _cvt.$1.decode(json['category']),
);
static final _cvt = (FieldType.stringType,);
/// Required. Controls the probability threshold at which harm is blocked.
///
/// See [SafetySettingThresholdEnum] for valid enum values.
final String? threshold;
/// Required. The category for this setting.
///
/// See [SafetySettingCategoryEnum] for valid enum values.
final String? category;
@override
Map<String, Object?> toJson() => {
if (threshold != null) 'threshold': _cvt.$1.encode(threshold),
if (category != null) 'category': _cvt.$1.encode(category),
};
}
/// The enum values for [SafetySetting.threshold].
///
/// Note that the set of enum values is not closed; we may see additional values
/// that are not specified here.
class SafetySettingThresholdEnum {
SafetySettingThresholdEnum._();
/// Threshold is unspecified.
static const String harmBlockThresholdUnspecified =
'HARM_BLOCK_THRESHOLD_UNSPECIFIED';
/// Content with NEGLIGIBLE will be allowed.
static const String blockLowAndAbove = 'BLOCK_LOW_AND_ABOVE';
/// Content with NEGLIGIBLE and LOW will be allowed.
static const String blockMediumAndAbove = 'BLOCK_MEDIUM_AND_ABOVE';
/// Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed.
static const String blockOnlyHigh = 'BLOCK_ONLY_HIGH';
/// All content will be allowed.
static const String blockNone = 'BLOCK_NONE';
/// Turn off the safety filter.
static const String off = 'OFF';
}
/// The enum values for [SafetySetting.category].
///
/// Note that the set of enum values is not closed; we may see additional values
/// that are not specified here.
class SafetySettingCategoryEnum {
SafetySettingCategoryEnum._();
/// Category is unspecified.
static const String harmCategoryUnspecified = 'HARM_CATEGORY_UNSPECIFIED';
/// **PaLM** - Negative or harmful comments targeting identity and/or
/// protected attribute.
static const String harmCategoryDerogatory = 'HARM_CATEGORY_DEROGATORY';
/// **PaLM** - Content that is rude, disrespectful, or profane.
static const String harmCategoryToxicity = 'HARM_CATEGORY_TOXICITY';
/// **PaLM** - Describes scenarios depicting violence against an individual or
/// group, or general descriptions of gore.
static const String harmCategoryViolence = 'HARM_CATEGORY_VIOLENCE';
/// **PaLM** - Contains references to sexual acts or other lewd content.
static const String harmCategorySexual = 'HARM_CATEGORY_SEXUAL';
/// **PaLM** - Promotes unchecked medical advice.
static const String harmCategoryMedical = 'HARM_CATEGORY_MEDICAL';
/// **PaLM** - Dangerous content that promotes, facilitates, or encourages
/// harmful acts.
static const String harmCategoryDangerous = 'HARM_CATEGORY_DANGEROUS';
/// **Gemini** - Harassment content.
static const String harmCategoryHarassment = 'HARM_CATEGORY_HARASSMENT';
/// **Gemini** - Hate speech and content.
static const String harmCategoryHateSpeech = 'HARM_CATEGORY_HATE_SPEECH';
/// **Gemini** - Sexually explicit content.
static const String harmCategorySexuallyExplicit =
'HARM_CATEGORY_SEXUALLY_EXPLICIT';
/// **Gemini** - Dangerous content.
static const String harmCategoryDangerousContent =
'HARM_CATEGORY_DANGEROUS_CONTENT';
/// **Gemini** - Content that may be used to harm civic integrity.
static const String harmCategoryCivicIntegrity =
'HARM_CATEGORY_CIVIC_INTEGRITY';
}
/// A list of floats representing an embedding.
class ContentEmbedding extends ProtoMessage {
ContentEmbedding({List<double>? values}) : values = values ?? [];
factory ContentEmbedding.fromJson(Map<String, Object?> json) =>
ContentEmbedding(values: _cvt.$1.decode(json['values']));
static final _cvt = (ListType(FieldType.doubleType),);
/// The embedding values.
final List<double> values;
@override
Map<String, Object?> toJson() => {'values': _cvt.$1.encode(values)};
}
/// The `Status` type defines a logical error model that is suitable for
/// different programming environments, including REST APIs and RPC APIs.
///
/// It is used by [gRPC](https://github.com/grpc). Each `Status` message
/// contains three pieces of data: error code, error message, and error details.
/// You can find out more about this error model and how to work with it in the
/// [API Design Guide](https://cloud.google.com/apis/design/errors).
class Status extends ProtoMessage {
Status({this.message, this.code, List<Any>? details})
: details = details ?? [];
factory Status.fromJson(Map<String, Object?> json) => Status(
message: _cvt.$1.decode(json['message']),
code: _cvt.$2.decode(json['code']),
details: _cvt.$3.decode(json['details']),
);
static final _cvt = (
FieldType.stringType,
FieldType.int32Type,
ListType(FieldType.anyType),
);
/// A developer-facing error message, which should be in English.
///
/// Any user-facing error message should be localized and sent in the
/// google.rpc.Status.details field, or localized by the client.
final String? message;
/// The status code, which should be an enum value of google.rpc.Code.
final int? code;
/// A list of messages that carry the error details.
///
/// There is a common set of message types for APIs to use.
final List<Any> details;
@override
Map<String, Object?> toJson() => {
if (message != null) 'message': _cvt.$1.encode(message),
if (code != null) 'code': _cvt.$2.encode(code),
'details': _cvt.$3.encode(details),
};
}
/// Information about a Generative Language Model.
class Model extends ProtoMessage {
Model({
this.topK,
this.outputTokenLimit,
List<String>? supportedGenerationMethods,
this.inputTokenLimit,
this.version,
this.topP,
this.description,
this.displayName,
this.name,
this.baseModelId,
this.temperature,
this.maxTemperature,
}) : supportedGenerationMethods = supportedGenerationMethods ?? [];
factory Model.fromJson(Map<String, Object?> json) => Model(
topK: _cvt.$1.decode(json['topK']),
outputTokenLimit: _cvt.$1.decode(json['outputTokenLimit']),
supportedGenerationMethods: _cvt.$2.decode(
json['supportedGenerationMethods'],
),
inputTokenLimit: _cvt.$1.decode(json['inputTokenLimit']),
version: _cvt.$3.decode(json['version']),
topP: _cvt.$4.decode(json['topP']),
description: _cvt.$3.decode(json['description']),
displayName: _cvt.$3.decode(json['displayName']),
name: _cvt.$3.decode(json['name']),
baseModelId: _cvt.$3.decode(json['baseModelId']),
temperature: _cvt.$4.decode(json['temperature']),
maxTemperature: _cvt.$4.decode(json['maxTemperature']),
);
static final _cvt = (
FieldType.int32Type,
ListType(FieldType.stringType),
FieldType.stringType,
FieldType.doubleType,
);
/// For Top-k sampling.
///
/// Top-k sampling considers the set of `top_k` most probable tokens. This
/// value specifies default to be used by the backend while making the call to
/// the model. If empty, indicates the model doesn't use top-k sampling, and
/// `top_k` isn't allowed as a generation parameter.
final int? topK;
/// Maximum number of output tokens available for this model.
final int? outputTokenLimit;
/// The model's supported generation methods.
///
/// The corresponding API method names are defined as Pascal case strings,
/// such as `generateMessage` and `generateContent`.
final List<String> supportedGenerationMethods;
/// Maximum number of input tokens allowed for this model.
final int? inputTokenLimit;
/// Required. The version number of the model.
///
/// This represents the major version (`1.0` or `1.5`)
final String? version;
/// For [Nucleus
/// sampling](https://ai.google.dev/gemini-api/docs/prompting-strategies#top-p).
///
/// Nucleus sampling considers the smallest set of tokens whose probability
/// sum is at least `top_p`. This value specifies default to be used by the
/// backend while making the call to the model.
final double? topP;
/// A short description of the model.
final String? description;
/// The human-readable name of the model.
///
/// E.g. "Gemini 1.5 Flash". The name can be up to 128 characters long and can
/// consist of any UTF-8 characters.
final String? displayName;
/// Required. The resource name of the `Model`.
///
/// Refer to [Model
/// variants](https://ai.google.dev/gemini-api/docs/models/gemini#model-variations)
/// for all allowed values. Format: `models/{model}` with a `{model}` naming
/// convention of:
/// * "{base_model_id}-{version}" Examples:
/// * `models/gemini-1.5-flash-001`
final String? name;
/// Required. The name of the base model, pass this to the generation request.
///
/// Examples:
/// * `gemini-1.5-flash`
final String? baseModelId;
/// Controls the randomness of the output.
///
/// Values can range over `[0.0,max_temperature]`, inclusive. A higher value
/// will produce responses that are more varied, while a value closer to `0.0`
/// will typically result in less surprising responses from the model. This
/// value specifies default to be used by the backend while making the call to
/// the model.
final double? temperature;
/// The maximum temperature this model can use.
final double? maxTemperature;
@override
Map<String, Object?> toJson() => {
if (topK != null) 'topK': _cvt.$1.encode(topK),
if (outputTokenLimit != null)
'outputTokenLimit': _cvt.$1.encode(outputTokenLimit),
'supportedGenerationMethods': _cvt.$2.encode(supportedGenerationMethods),
if (inputTokenLimit != null)
'inputTokenLimit': _cvt.$1.encode(inputTokenLimit),
if (version != null) 'version': _cvt.$3.encode(version),
if (topP != null) 'topP': _cvt.$4.encode(topP),
if (description != null) 'description': _cvt.$3.encode(description),
if (displayName != null) 'displayName': _cvt.$3.encode(displayName),
if (name != null) 'name': _cvt.$3.encode(name),
if (baseModelId != null) 'baseModelId': _cvt.$3.encode(baseModelId),
if (temperature != null) 'temperature': _cvt.$4.encode(temperature),
if (maxTemperature != null)
'maxTemperature': _cvt.$4.encode(maxTemperature),
};
}
/// Grounding support.
class GroundingSupport extends ProtoMessage {
GroundingSupport({
List<int>? groundingChunkIndices,
List<double>? confidenceScores,
this.segment,
}) : groundingChunkIndices = groundingChunkIndices ?? [],
confidenceScores = confidenceScores ?? [];
factory GroundingSupport.fromJson(Map<String, Object?> json) =>
GroundingSupport(
groundingChunkIndices: _cvt.$1.decode(json['groundingChunkIndices']),
confidenceScores: _cvt.$2.decode(json['confidenceScores']),
segment: _cvt.$3.decode(json['segment']),
);
static final _cvt = (
ListType(FieldType.int32Type),
ListType(FieldType.doubleType),
SchemaType(Segment.fromJson),
);
/// A list of indices (into 'grounding_chunk') specifying the citations
/// associated with the claim.
///
/// For instance [1,3,4] means that grounding_chunk[1], grounding_chunk[3],
/// grounding_chunk[4] are the retrieved content attributed to the claim.
final List<int> groundingChunkIndices;
/// Confidence score of the support references.
///
/// Ranges from 0 to 1. 1 is the most confident. This list must have the same
/// size as the grounding_chunk_indices.
final List<double> confidenceScores;
/// Segment of the content this support belongs to.
final Segment? segment;
@override
Map<String, Object?> toJson() => {
'groundingChunkIndices': _cvt.$1.encode(groundingChunkIndices),
'confidenceScores': _cvt.$2.encode(confidenceScores),
if (segment != null) 'segment': _cvt.$3.encode(segment),
};
}
/// A response from `CountTokens`.
///
/// It returns the model's `token_count` for the `prompt`.
class CountTokensResponse extends ProtoMessage {
CountTokensResponse({this.totalTokens});
factory CountTokensResponse.fromJson(Map<String, Object?> json) =>
CountTokensResponse(totalTokens: _cvt.$1.decode(json['totalTokens']));
static final _cvt = (FieldType.int32Type,);
/// The number of tokens that the `Model` tokenizes the `prompt` into.
///
/// Always non-negative.
final int? totalTokens;
@override
Map<String, Object?> toJson() => {
if (totalTokens != null) 'totalTokens': _cvt.$1.encode(totalTokens),
};
}
/// The response to a `BatchEmbedContentsRequest`.
class BatchEmbedContentsResponse extends ProtoMessage {
BatchEmbedContentsResponse({List<ContentEmbedding>? embeddings})
: embeddings = embeddings ?? [];
factory BatchEmbedContentsResponse.fromJson(Map<String, Object?> json) =>
BatchEmbedContentsResponse(
embeddings: _cvt.$1.decode(json['embeddings']),
);
static final _cvt = (ListType(SchemaType(ContentEmbedding.fromJson)),);
/// Output only. The embeddings for each request, in the same order as
/// provided in the batch request.
@readonly
final List<ContentEmbedding> embeddings;
@override
Map<String, Object?> toJson() => {'embeddings': _cvt.$1.encode(embeddings)};
}
/// A generic empty message that you can re-use to avoid defining duplicated
/// empty messages in your APIs.
///
/// A typical example is to use it as the request or the response type of an API
/// method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns
/// (google.protobuf.Empty); }
class Empty extends ProtoMessage {
Empty();
factory Empty.fromJson(Map<String, Object?> json) => Empty();
@override
Map<String, Object?> toJson() => {};
}
/// Metadata on the generation request's token usage.
class UsageMetadata extends ProtoMessage {
UsageMetadata({
this.candidatesTokenCount,
this.totalTokenCount,
this.promptTokenCount,
});
factory UsageMetadata.fromJson(Map<String, Object?> json) => UsageMetadata(
candidatesTokenCount: _cvt.$1.decode(json['candidatesTokenCount']),
totalTokenCount: _cvt.$1.decode(json['totalTokenCount']),
promptTokenCount: _cvt.$1.decode(json['promptTokenCount']),
);
static final _cvt = (FieldType.int32Type,);
/// Total number of tokens across all the generated response candidates.
final int? candidatesTokenCount;
/// Total token count for the generation request (prompt + response
/// candidates).
final int? totalTokenCount;
/// Number of tokens in the prompt.
///
/// When `cached_content` is set, this is still the total effective prompt
/// size meaning this includes the number of tokens in the cached content.
final int? promptTokenCount;
@override
Map<String, Object?> toJson() => {
if (candidatesTokenCount != null)
'candidatesTokenCount': _cvt.$1.encode(candidatesTokenCount),
if (totalTokenCount != null)
'totalTokenCount': _cvt.$1.encode(totalTokenCount),
if (promptTokenCount != null)
'promptTokenCount': _cvt.$1.encode(promptTokenCount),
};
}
/// Batch request to get embeddings from the model for a list of prompts.
class BatchEmbedContentsRequest extends ProtoMessage {
BatchEmbedContentsRequest({List<EmbedContentRequest>? requests})
: requests = requests ?? [];
factory BatchEmbedContentsRequest.fromJson(Map<String, Object?> json) =>
BatchEmbedContentsRequest(requests: _cvt.$1.decode(json['requests']));
static final _cvt = (ListType(SchemaType(EmbedContentRequest.fromJson)),);
/// Required. Embed requests for the batch.
///
/// The model in each of these requests must match the model specified
/// `BatchEmbedContentsRequest.model`.
final List<EmbedContentRequest> requests;
@override
Map<String, Object?> toJson() => {'requests': _cvt.$1.encode(requests)};
}
/// Safety rating for a piece of content.
///
/// The safety rating contains the category of harm and the harm probability
/// level in that category for a piece of content. Content is classified for
/// safety across a number of harm categories and the probability of the harm
/// classification is included here.
class SafetyRating extends ProtoMessage {
SafetyRating({this.category, this.blocked, this.probability});
factory SafetyRating.fromJson(Map<String, Object?> json) => SafetyRating(
category: _cvt.$1.decode(json['category']),
blocked: _cvt.$2.decode(json['blocked']),
probability: _cvt.$1.decode(json['probability']),
);
static final _cvt = (FieldType.stringType, FieldType.boolType);
/// Required. The category for this rating.
///
/// See [SafetyRatingCategoryEnum] for valid enum values.
final String? category;
/// Was this content blocked because of this rating?
final bool? blocked;
/// Required. The probability of harm for this content.
///
/// See [SafetyRatingProbabilityEnum] for valid enum values.
final String? probability;
@override
Map<String, Object?> toJson() => {
if (category != null) 'category': _cvt.$1.encode(category),
if (blocked != null) 'blocked': _cvt.$2.encode(blocked),
if (probability != null) 'probability': _cvt.$1.encode(probability),
};
}
/// The enum values for [SafetyRating.category].
///
/// Note that the set of enum values is not closed; we may see additional values
/// that are not specified here.
class SafetyRatingCategoryEnum {
SafetyRatingCategoryEnum._();
/// Category is unspecified.
static const String harmCategoryUnspecified = 'HARM_CATEGORY_UNSPECIFIED';
/// **PaLM** - Negative or harmful comments targeting identity and/or
/// protected attribute.
static const String harmCategoryDerogatory = 'HARM_CATEGORY_DEROGATORY';
/// **PaLM** - Content that is rude, disrespectful, or profane.
static const String harmCategoryToxicity = 'HARM_CATEGORY_TOXICITY';
/// **PaLM** - Describes scenarios depicting violence against an individual or
/// group, or general descriptions of gore.
static const String harmCategoryViolence = 'HARM_CATEGORY_VIOLENCE';
/// **PaLM** - Contains references to sexual acts or other lewd content.
static const String harmCategorySexual = 'HARM_CATEGORY_SEXUAL';
/// **PaLM** - Promotes unchecked medical advice.
static const String harmCategoryMedical = 'HARM_CATEGORY_MEDICAL';
/// **PaLM** - Dangerous content that promotes, facilitates, or encourages
/// harmful acts.
static const String harmCategoryDangerous = 'HARM_CATEGORY_DANGEROUS';
/// **Gemini** - Harassment content.
static const String harmCategoryHarassment = 'HARM_CATEGORY_HARASSMENT';
/// **Gemini** - Hate speech and content.
static const String harmCategoryHateSpeech = 'HARM_CATEGORY_HATE_SPEECH';
/// **Gemini** - Sexually explicit content.
static const String harmCategorySexuallyExplicit =
'HARM_CATEGORY_SEXUALLY_EXPLICIT';
/// **Gemini** - Dangerous content.
static const String harmCategoryDangerousContent =
'HARM_CATEGORY_DANGEROUS_CONTENT';
/// **Gemini** - Content that may be used to harm civic integrity.
static const String harmCategoryCivicIntegrity =
'HARM_CATEGORY_CIVIC_INTEGRITY';
}
/// The enum values for [SafetyRating.probability].
///
/// Note that the set of enum values is not closed; we may see additional values
/// that are not specified here.
class SafetyRatingProbabilityEnum {
SafetyRatingProbabilityEnum._();
/// Probability is unspecified.
static const String harmProbabilityUnspecified =
'HARM_PROBABILITY_UNSPECIFIED';
/// Content has a negligible chance of being unsafe.
static const String negligible = 'NEGLIGIBLE';
/// Content has a low chance of being unsafe.
static const String low = 'LOW';
/// Content has a medium chance of being unsafe.
static const String medium = 'MEDIUM';
/// Content has a high chance of being unsafe.
static const String high = 'HIGH';
}
/// Metadata related to retrieval in the grounding flow.
class RetrievalMetadata extends ProtoMessage {
RetrievalMetadata({this.googleSearchDynamicRetrievalScore});
factory RetrievalMetadata.fromJson(Map<String, Object?> json) =>
RetrievalMetadata(
googleSearchDynamicRetrievalScore: _cvt.$1.decode(
json['googleSearchDynamicRetrievalScore'],
),
);
static final _cvt = (FieldType.doubleType,);
/// Optional. Score indicating how likely information from google search could
/// help answer the prompt.
///
/// The score is in the range [0, 1], where 0 is the least likely and 1 is the
/// most likely. This score is only populated when google search grounding and
/// dynamic retrieval is enabled. It will be compared to the threshold to
/// determine whether to trigger google search.
final double? googleSearchDynamicRetrievalScore;
@override
Map<String, Object?> toJson() => {
if (googleSearchDynamicRetrievalScore != null)
'googleSearchDynamicRetrievalScore': _cvt.$1.encode(
googleSearchDynamicRetrievalScore,
),
};
}
/// Response from `ListModel` containing a paginated list of Models.
class ListModelsResponse extends ProtoMessage {
ListModelsResponse({this.nextPageToken, List<Model>? models})
: models = models ?? [];
factory ListModelsResponse.fromJson(Map<String, Object?> json) =>
ListModelsResponse(
nextPageToken: _cvt.$1.decode(json['nextPageToken']),
models: _cvt.$2.decode(json['models']),
);
static final _cvt = (
FieldType.stringType,
ListType(SchemaType(Model.fromJson)),
);
/// A token, which can be sent as `page_token` to retrieve the next page.
///
/// If this field is omitted, there are no more pages.
final String? nextPageToken;
/// The returned Models.
final List<Model> models;
@override
Map<String, Object?> toJson() => {
if (nextPageToken != null) 'nextPageToken': _cvt.$1.encode(nextPageToken),
'models': _cvt.$2.encode(models),
};
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment