From 151eaa40020672c83cf9e2ae1ab8b8678238c2f5 Mon Sep 17 00:00:00 2001 From: Christina Holland Date: Mon, 15 Sep 2025 12:51:51 -0700 Subject: [PATCH] Change documentation tags for hybrid inference from EXPERIMENTAL to public preview --- .changeset/selfish-elephants-sniff.md | 5 ++ common/api-review/ai.api.md | 32 ++++++----- docs-devsite/ai.chromeadapter.md | 20 +++++-- docs-devsite/ai.hybridparams.md | 20 +++++-- .../ai.languagemodelcreatecoreoptions.md | 20 +++++-- docs-devsite/ai.languagemodelcreateoptions.md | 15 ++++-- docs-devsite/ai.languagemodelexpected.md | 15 ++++-- docs-devsite/ai.languagemodelmessage.md | 15 ++++-- .../ai.languagemodelmessagecontent.md | 15 ++++-- docs-devsite/ai.languagemodelpromptoptions.md | 10 +++- docs-devsite/ai.md | 53 ++++++++++++------- docs-devsite/ai.ondeviceparams.md | 15 ++++-- packages/ai/src/types/chrome-adapter.ts | 4 +- packages/ai/src/types/enums.ts | 6 +-- packages/ai/src/types/language-model.ts | 30 ++++------- packages/ai/src/types/requests.ts | 6 +-- 16 files changed, 189 insertions(+), 92 deletions(-) create mode 100644 .changeset/selfish-elephants-sniff.md diff --git a/.changeset/selfish-elephants-sniff.md b/.changeset/selfish-elephants-sniff.md new file mode 100644 index 00000000000..dc791fbb9b8 --- /dev/null +++ b/.changeset/selfish-elephants-sniff.md @@ -0,0 +1,5 @@ +--- +'@firebase/ai': patch +--- + +Change documentation tags for hybrid inference from "EXPERIMENTAL" to "public preview". diff --git a/common/api-review/ai.api.md b/common/api-review/ai.api.md index c1b570a7e05..d4c3c1ad507 100644 --- a/common/api-review/ai.api.md +++ b/common/api-review/ai.api.md @@ -133,6 +133,7 @@ export class BooleanSchema extends Schema { // @public export class ChatSession { + // Warning: (ae-incompatible-release-tags) The symbol "__constructor" is marked as @public, but its signature references "ChromeAdapter" which is marked as @beta constructor(apiSettings: ApiSettings, model: string, chromeAdapter?: ChromeAdapter | undefined, params?: StartChatParams | undefined, requestOptions?: RequestOptions | undefined); getHistory(): Promise; // (undocumented) @@ -145,7 +146,7 @@ export class ChatSession { sendMessageStream(request: string | Array): Promise; } -// @public +// @beta export interface ChromeAdapter { // @internal (undocumented) countTokens(request: CountTokensRequest): Promise; @@ -520,6 +521,7 @@ export interface GenerativeContentBlob { // @public export class GenerativeModel extends AIModel { + // Warning: (ae-incompatible-release-tags) The symbol "__constructor" is marked as @public, but its signature references "ChromeAdapter" which is marked as @beta constructor(ai: AI, modelParams: ModelParams, requestOptions?: RequestOptions, chromeAdapter?: ChromeAdapter | undefined); countTokens(request: CountTokensRequest | string | Array): Promise; generateContent(request: GenerateContentRequest | string | Array): Promise; @@ -542,6 +544,8 @@ export class GenerativeModel extends AIModel { // @public export function getAI(app?: FirebaseApp, options?: AIOptions): AI; +// Warning: (ae-incompatible-release-tags) The symbol "getGenerativeModel" is marked as @public, but its signature references "HybridParams" which is marked as @beta +// // @public export function getGenerativeModel(ai: AI, modelParams: ModelParams | HybridParams, requestOptions?: RequestOptions): GenerativeModel; @@ -695,7 +699,7 @@ export const HarmSeverity: { // @public export type HarmSeverity = (typeof HarmSeverity)[keyof typeof HarmSeverity]; -// @public +// @beta export interface HybridParams { inCloudParams?: ModelParams; mode: InferenceMode; @@ -795,7 +799,7 @@ export interface ImagenSafetySettings { safetyFilterLevel?: ImagenSafetyFilterLevel; } -// @public +// @beta export const InferenceMode: { readonly PREFER_ON_DEVICE: "prefer_on_device"; readonly ONLY_ON_DEVICE: "only_on_device"; @@ -803,7 +807,7 @@ export const InferenceMode: { readonly PREFER_IN_CLOUD: "prefer_in_cloud"; }; -// @public +// @beta export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode]; // @public @@ -841,7 +845,7 @@ export const Language: { // @public export type Language = (typeof Language)[keyof typeof Language]; -// @public +// @beta export interface LanguageModelCreateCoreOptions { // (undocumented) expectedInputs?: LanguageModelExpected[]; @@ -851,7 +855,7 @@ export interface LanguageModelCreateCoreOptions { topK?: number; } -// @public +// @beta export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions { // (undocumented) initialPrompts?: LanguageModelMessage[]; @@ -859,7 +863,7 @@ export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptio signal?: AbortSignal; } -// @public +// @beta export interface LanguageModelExpected { // (undocumented) languages?: string[]; @@ -867,7 +871,7 @@ export interface LanguageModelExpected { type: LanguageModelMessageType; } -// @public +// @beta export interface LanguageModelMessage { // (undocumented) content: LanguageModelMessageContent[]; @@ -875,7 +879,7 @@ export interface LanguageModelMessage { role: LanguageModelMessageRole; } -// @public +// @beta export interface LanguageModelMessageContent { // (undocumented) type: LanguageModelMessageType; @@ -883,16 +887,16 @@ export interface LanguageModelMessageContent { value: LanguageModelMessageContentValue; } -// @public +// @beta export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | BufferSource | string; -// @public +// @beta export type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; -// @public +// @beta export type LanguageModelMessageType = 'text' | 'image' | 'audio'; -// @public +// @beta export interface LanguageModelPromptOptions { // (undocumented) responseConstraint?: object; @@ -1046,7 +1050,7 @@ export interface ObjectSchemaRequest extends SchemaRequest { type: 'object'; } -// @public +// @beta export interface OnDeviceParams { // (undocumented) createOptions?: LanguageModelCreateOptions; diff --git a/docs-devsite/ai.chromeadapter.md b/docs-devsite/ai.chromeadapter.md index e9207614992..e9a7a512503 100644 --- a/docs-devsite/ai.chromeadapter.md +++ b/docs-devsite/ai.chromeadapter.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # ChromeAdapter interface -(EXPERIMENTAL) Defines an inference "backend" that uses Chrome's on-device model, and encapsulates logic for detecting when on-device inference is possible. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Defines an inference "backend" that uses Chrome's on-device model, and encapsulates logic for detecting when on-device inference is possible. These methods should not be called directly by the user. @@ -24,12 +27,15 @@ export interface ChromeAdapter | Method | Description | | --- | --- | -| [generateContent(request)](./ai.chromeadapter.md#chromeadaptergeneratecontent) | Generates content using on-device inference. | -| [generateContentStream(request)](./ai.chromeadapter.md#chromeadaptergeneratecontentstream) | Generates a content stream using on-device inference. | -| [isAvailable(request)](./ai.chromeadapter.md#chromeadapterisavailable) | Checks if the on-device model is capable of handling a given request. | +| [generateContent(request)](./ai.chromeadapter.md#chromeadaptergeneratecontent) | (Public Preview) Generates content using on-device inference. | +| [generateContentStream(request)](./ai.chromeadapter.md#chromeadaptergeneratecontentstream) | (Public Preview) Generates a content stream using on-device inference. | +| [isAvailable(request)](./ai.chromeadapter.md#chromeadapterisavailable) | (Public Preview) Checks if the on-device model is capable of handling a given request. | ## ChromeAdapter.generateContent() +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Generates content using on-device inference. This is comparable to [GenerativeModel.generateContent()](./ai.generativemodel.md#generativemodelgeneratecontent) for generating content using in-cloud inference. @@ -52,6 +58,9 @@ Promise<Response> ## ChromeAdapter.generateContentStream() +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Generates a content stream using on-device inference. This is comparable to [GenerativeModel.generateContentStream()](./ai.generativemodel.md#generativemodelgeneratecontentstream) for generating a content stream using in-cloud inference. @@ -74,6 +83,9 @@ Promise<Response> ## ChromeAdapter.isAvailable() +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Checks if the on-device model is capable of handling a given request. Signature: diff --git a/docs-devsite/ai.hybridparams.md b/docs-devsite/ai.hybridparams.md index baf568217d3..558b54abf8d 100644 --- a/docs-devsite/ai.hybridparams.md +++ b/docs-devsite/ai.hybridparams.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # HybridParams interface -(EXPERIMENTAL) Configures hybrid inference. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Configures hybrid inference. Signature: @@ -22,12 +25,15 @@ export interface HybridParams | Property | Type | Description | | --- | --- | --- | -| [inCloudParams](./ai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./ai.modelparams.md#modelparams_interface) | Optional. Specifies advanced params for in-cloud inference. | -| [mode](./ai.hybridparams.md#hybridparamsmode) | [InferenceMode](./ai.md#inferencemode) | Specifies on-device or in-cloud inference. Defaults to prefer on-device. | -| [onDeviceParams](./ai.hybridparams.md#hybridparamsondeviceparams) | [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | Optional. Specifies advanced params for on-device inference. | +| [inCloudParams](./ai.hybridparams.md#hybridparamsincloudparams) | [ModelParams](./ai.modelparams.md#modelparams_interface) | (Public Preview) Optional. Specifies advanced params for in-cloud inference. | +| [mode](./ai.hybridparams.md#hybridparamsmode) | [InferenceMode](./ai.md#inferencemode) | (Public Preview) Specifies on-device or in-cloud inference. Defaults to prefer on-device. | +| [onDeviceParams](./ai.hybridparams.md#hybridparamsondeviceparams) | [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | (Public Preview) Optional. Specifies advanced params for on-device inference. | ## HybridParams.inCloudParams +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Optional. Specifies advanced params for in-cloud inference. Signature: @@ -38,6 +44,9 @@ inCloudParams?: ModelParams; ## HybridParams.mode +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Specifies on-device or in-cloud inference. Defaults to prefer on-device. Signature: @@ -48,6 +57,9 @@ mode: InferenceMode; ## HybridParams.onDeviceParams +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Optional. Specifies advanced params for on-device inference. Signature: diff --git a/docs-devsite/ai.languagemodelcreatecoreoptions.md b/docs-devsite/ai.languagemodelcreatecoreoptions.md index 3b221933034..299d5d10603 100644 --- a/docs-devsite/ai.languagemodelcreatecoreoptions.md +++ b/docs-devsite/ai.languagemodelcreatecoreoptions.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # LanguageModelCreateCoreOptions interface -(EXPERIMENTAL) Configures the creation of an on-device language model session. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Configures the creation of an on-device language model session. Signature: @@ -22,12 +25,15 @@ export interface LanguageModelCreateCoreOptions | Property | Type | Description | | --- | --- | --- | -| [expectedInputs](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionsexpectedinputs) | [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface)\[\] | | -| [temperature](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstemperature) | number | | -| [topK](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstopk) | number | | +| [expectedInputs](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionsexpectedinputs) | [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface)\[\] | (Public Preview) | +| [temperature](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstemperature) | number | (Public Preview) | +| [topK](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptionstopk) | number | (Public Preview) | ## LanguageModelCreateCoreOptions.expectedInputs +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript @@ -36,6 +42,9 @@ expectedInputs?: LanguageModelExpected[]; ## LanguageModelCreateCoreOptions.temperature +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript @@ -44,6 +53,9 @@ temperature?: number; ## LanguageModelCreateCoreOptions.topK +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript diff --git a/docs-devsite/ai.languagemodelcreateoptions.md b/docs-devsite/ai.languagemodelcreateoptions.md index 5d2ec9c69ad..5949722d7e3 100644 --- a/docs-devsite/ai.languagemodelcreateoptions.md +++ b/docs-devsite/ai.languagemodelcreateoptions.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # LanguageModelCreateOptions interface -(EXPERIMENTAL) Configures the creation of an on-device language model session. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Configures the creation of an on-device language model session. Signature: @@ -23,11 +26,14 @@ export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptio | Property | Type | Description | | --- | --- | --- | -| [initialPrompts](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionsinitialprompts) | [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface)\[\] | | -| [signal](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionssignal) | AbortSignal | | +| [initialPrompts](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionsinitialprompts) | [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface)\[\] | (Public Preview) | +| [signal](./ai.languagemodelcreateoptions.md#languagemodelcreateoptionssignal) | AbortSignal | (Public Preview) | ## LanguageModelCreateOptions.initialPrompts +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript @@ -36,6 +42,9 @@ initialPrompts?: LanguageModelMessage[]; ## LanguageModelCreateOptions.signal +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript diff --git a/docs-devsite/ai.languagemodelexpected.md b/docs-devsite/ai.languagemodelexpected.md index d27e718e1eb..1afe4f86cc0 100644 --- a/docs-devsite/ai.languagemodelexpected.md +++ b/docs-devsite/ai.languagemodelexpected.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # LanguageModelExpected interface -(EXPERIMENTAL) Options for the expected inputs for an on-device language model. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Options for the expected inputs for an on-device language model. Signature: @@ -22,11 +25,14 @@ export interface LanguageModelExpected | Property | Type | Description | | --- | --- | --- | -| [languages](./ai.languagemodelexpected.md#languagemodelexpectedlanguages) | string\[\] | | -| [type](./ai.languagemodelexpected.md#languagemodelexpectedtype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | | +| [languages](./ai.languagemodelexpected.md#languagemodelexpectedlanguages) | string\[\] | (Public Preview) | +| [type](./ai.languagemodelexpected.md#languagemodelexpectedtype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | (Public Preview) | ## LanguageModelExpected.languages +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript @@ -35,6 +41,9 @@ languages?: string[]; ## LanguageModelExpected.type +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript diff --git a/docs-devsite/ai.languagemodelmessage.md b/docs-devsite/ai.languagemodelmessage.md index 228a31c8521..5f133e458bc 100644 --- a/docs-devsite/ai.languagemodelmessage.md +++ b/docs-devsite/ai.languagemodelmessage.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # LanguageModelMessage interface -(EXPERIMENTAL) An on-device language model message. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +An on-device language model message. Signature: @@ -22,11 +25,14 @@ export interface LanguageModelMessage | Property | Type | Description | | --- | --- | --- | -| [content](./ai.languagemodelmessage.md#languagemodelmessagecontent) | [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface)\[\] | | -| [role](./ai.languagemodelmessage.md#languagemodelmessagerole) | [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | | +| [content](./ai.languagemodelmessage.md#languagemodelmessagecontent) | [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface)\[\] | (Public Preview) | +| [role](./ai.languagemodelmessage.md#languagemodelmessagerole) | [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | (Public Preview) | ## LanguageModelMessage.content +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript @@ -35,6 +41,9 @@ content: LanguageModelMessageContent[]; ## LanguageModelMessage.role +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript diff --git a/docs-devsite/ai.languagemodelmessagecontent.md b/docs-devsite/ai.languagemodelmessagecontent.md index 71d2ce9919b..0545882c983 100644 --- a/docs-devsite/ai.languagemodelmessagecontent.md +++ b/docs-devsite/ai.languagemodelmessagecontent.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # LanguageModelMessageContent interface -(EXPERIMENTAL) An on-device language model content object. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +An on-device language model content object. Signature: @@ -22,11 +25,14 @@ export interface LanguageModelMessageContent | Property | Type | Description | | --- | --- | --- | -| [type](./ai.languagemodelmessagecontent.md#languagemodelmessagecontenttype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | | -| [value](./ai.languagemodelmessagecontent.md#languagemodelmessagecontentvalue) | [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | | +| [type](./ai.languagemodelmessagecontent.md#languagemodelmessagecontenttype) | [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | (Public Preview) | +| [value](./ai.languagemodelmessagecontent.md#languagemodelmessagecontentvalue) | [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | (Public Preview) | ## LanguageModelMessageContent.type +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript @@ -35,6 +41,9 @@ type: LanguageModelMessageType; ## LanguageModelMessageContent.value +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript diff --git a/docs-devsite/ai.languagemodelpromptoptions.md b/docs-devsite/ai.languagemodelpromptoptions.md index 35a22c3d1a6..d681fdec94f 100644 --- a/docs-devsite/ai.languagemodelpromptoptions.md +++ b/docs-devsite/ai.languagemodelpromptoptions.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # LanguageModelPromptOptions interface -(EXPERIMENTAL) Options for an on-device language model prompt. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Options for an on-device language model prompt. Signature: @@ -22,10 +25,13 @@ export interface LanguageModelPromptOptions | Property | Type | Description | | --- | --- | --- | -| [responseConstraint](./ai.languagemodelpromptoptions.md#languagemodelpromptoptionsresponseconstraint) | object | | +| [responseConstraint](./ai.languagemodelpromptoptions.md#languagemodelpromptoptionsresponseconstraint) | object | (Public Preview) | ## LanguageModelPromptOptions.responseConstraint +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript diff --git a/docs-devsite/ai.md b/docs-devsite/ai.md index e4e382256b3..78d82d23800 100644 --- a/docs-devsite/ai.md +++ b/docs-devsite/ai.md @@ -57,7 +57,7 @@ The Firebase AI Web SDK. | [AIOptions](./ai.aioptions.md#aioptions_interface) | Options for initializing the AI service using [getAI()](./ai.md#getai_a94a413). This allows specifying which backend to use (Vertex AI Gemini API or Gemini Developer API) and configuring its specific options (like location for Vertex AI). | | [AudioConversationController](./ai.audioconversationcontroller.md#audioconversationcontroller_interface) | (Public Preview) A controller for managing an active audio conversation. | | [BaseParams](./ai.baseparams.md#baseparams_interface) | Base parameters for a number of methods. | -| [ChromeAdapter](./ai.chromeadapter.md#chromeadapter_interface) | (EXPERIMENTAL) Defines an inference "backend" that uses Chrome's on-device model, and encapsulates logic for detecting when on-device inference is possible.These methods should not be called directly by the user. | +| [ChromeAdapter](./ai.chromeadapter.md#chromeadapter_interface) | (Public Preview) Defines an inference "backend" that uses Chrome's on-device model, and encapsulates logic for detecting when on-device inference is possible.These methods should not be called directly by the user. | | [Citation](./ai.citation.md#citation_interface) | A single citation. | | [CitationMetadata](./ai.citationmetadata.md#citationmetadata_interface) | Citation metadata that may be found on a [GenerateContentCandidate](./ai.generatecontentcandidate.md#generatecontentcandidate_interface). | | [CodeExecutionResult](./ai.codeexecutionresult.md#codeexecutionresult_interface) | The results of code execution run by the model. | @@ -93,7 +93,7 @@ The Firebase AI Web SDK. | [GroundingChunk](./ai.groundingchunk.md#groundingchunk_interface) | Represents a chunk of retrieved data that supports a claim in the model's response. This is part of the grounding information provided when grounding is enabled. | | [GroundingMetadata](./ai.groundingmetadata.md#groundingmetadata_interface) | Metadata returned when grounding is enabled.Currently, only Grounding with Google Search is supported (see [GoogleSearchTool](./ai.googlesearchtool.md#googlesearchtool_interface)).Important: If using Grounding with Google Search, you are required to comply with the "Grounding with Google Search" usage requirements for your chosen API provider: [Gemini Developer API](https://ai.google.dev/gemini-api/terms#grounding-with-google-search) or Vertex AI Gemini API (see [Service Terms](https://cloud.google.com/terms/service-terms) section within the Service Specific Terms). | | [GroundingSupport](./ai.groundingsupport.md#groundingsupport_interface) | Provides information about how a specific segment of the model's response is supported by the retrieved grounding chunks. | -| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | (EXPERIMENTAL) Configures hybrid inference. | +| [HybridParams](./ai.hybridparams.md#hybridparams_interface) | (Public Preview) Configures hybrid inference. | | [ImagenGCSImage](./ai.imagengcsimage.md#imagengcsimage_interface) | (Public Preview) An image generated by Imagen, stored in a Cloud Storage for Firebase bucket.This feature is not available yet. | | [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface) | (Public Preview) Configuration options for generating images with Imagen.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images-imagen) for more details. | | [ImagenGenerationResponse](./ai.imagengenerationresponse.md#imagengenerationresponse_interface) | (Public Preview) The response from a request to generate images with Imagen. | @@ -101,12 +101,12 @@ The Firebase AI Web SDK. | [ImagenModelParams](./ai.imagenmodelparams.md#imagenmodelparams_interface) | (Public Preview) Parameters for configuring an [ImagenModel](./ai.imagenmodel.md#imagenmodel_class). | | [ImagenSafetySettings](./ai.imagensafetysettings.md#imagensafetysettings_interface) | (Public Preview) Settings for controlling the aggressiveness of filtering out sensitive content.See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details. | | [InlineDataPart](./ai.inlinedatapart.md#inlinedatapart_interface) | Content part interface if the part represents an image. | -| [LanguageModelCreateCoreOptions](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptions_interface) | (EXPERIMENTAL) Configures the creation of an on-device language model session. | -| [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | (EXPERIMENTAL) Configures the creation of an on-device language model session. | -| [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface) | (EXPERIMENTAL) Options for the expected inputs for an on-device language model. | -| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | (EXPERIMENTAL) An on-device language model message. | -| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | (EXPERIMENTAL) An on-device language model content object. | -| [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | (EXPERIMENTAL) Options for an on-device language model prompt. | +| [LanguageModelCreateCoreOptions](./ai.languagemodelcreatecoreoptions.md#languagemodelcreatecoreoptions_interface) | (Public Preview) Configures the creation of an on-device language model session. | +| [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | (Public Preview) Configures the creation of an on-device language model session. | +| [LanguageModelExpected](./ai.languagemodelexpected.md#languagemodelexpected_interface) | (Public Preview) Options for the expected inputs for an on-device language model. | +| [LanguageModelMessage](./ai.languagemodelmessage.md#languagemodelmessage_interface) | (Public Preview) An on-device language model message. | +| [LanguageModelMessageContent](./ai.languagemodelmessagecontent.md#languagemodelmessagecontent_interface) | (Public Preview) An on-device language model content object. | +| [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | (Public Preview) Options for an on-device language model prompt. | | [LiveGenerationConfig](./ai.livegenerationconfig.md#livegenerationconfig_interface) | (Public Preview) Configuration parameters used by [LiveGenerativeModel](./ai.livegenerativemodel.md#livegenerativemodel_class) to control live content generation. | | [LiveModelParams](./ai.livemodelparams.md#livemodelparams_interface) | (Public Preview) Params passed to [getLiveGenerativeModel()](./ai.md#getlivegenerativemodel_f2099ac). | | [LiveServerContent](./ai.liveservercontent.md#liveservercontent_interface) | (Public Preview) An incremental content update from the model. | @@ -115,7 +115,7 @@ The Firebase AI Web SDK. | [ModalityTokenCount](./ai.modalitytokencount.md#modalitytokencount_interface) | Represents token counting info for a single modality. | | [ModelParams](./ai.modelparams.md#modelparams_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). | | [ObjectSchemaRequest](./ai.objectschemarequest.md#objectschemarequest_interface) | Interface for JSON parameters in a schema of [SchemaType](./ai.md#schematype) "object" when not using the Schema.object() helper. | -| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | (EXPERIMENTAL) Encapsulates configuration for on-device inference. | +| [OnDeviceParams](./ai.ondeviceparams.md#ondeviceparams_interface) | (Public Preview) Encapsulates configuration for on-device inference. | | [PrebuiltVoiceConfig](./ai.prebuiltvoiceconfig.md#prebuiltvoiceconfig_interface) | (Public Preview) Configuration for a pre-built voice. | | [PromptFeedback](./ai.promptfeedback.md#promptfeedback_interface) | If the prompt was blocked, this will be populated with blockReason and the relevant safetyRatings. | | [RequestOptions](./ai.requestoptions.md#requestoptions_interface) | Params passed to [getGenerativeModel()](./ai.md#getgenerativemodel_c63f46a). | @@ -157,7 +157,7 @@ The Firebase AI Web SDK. | [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. | | [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. | | [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence, sexual, derogatory, and toxic). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. | -| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. | +| [InferenceMode](./ai.md#inferencemode) | (Public Preview) Determines whether inference happens on-device or in-cloud. | | [Language](./ai.md#language) | The programming language of the code. | | [LiveResponseType](./ai.md#liveresponsetype) | (Public Preview) The types of responses that can be returned by [LiveSession.receive()](./ai.livesession.md#livesessionreceive). | | [Modality](./ai.md#modality) | Content part modality. | @@ -183,11 +183,11 @@ The Firebase AI Web SDK. | [ImagenAspectRatio](./ai.md#imagenaspectratio) | (Public Preview) Aspect ratios for Imagen images.To specify an aspect ratio for generated images, set the aspectRatio property in your [ImagenGenerationConfig](./ai.imagengenerationconfig.md#imagengenerationconfig_interface).See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) for more details and examples of the supported aspect ratios. | | [ImagenPersonFilterLevel](./ai.md#imagenpersonfilterlevel) | (Public Preview) A filter level controlling whether generation of images containing people or faces is allowed.See the personGeneration documentation for more details. | | [ImagenSafetyFilterLevel](./ai.md#imagensafetyfilterlevel) | (Public Preview) A filter level controlling how aggressively to filter sensitive content.Text prompts provided as inputs and images (generated or uploaded) through Imagen on Vertex AI are assessed against a list of safety filters, which include 'harmful categories' (for example, violence, sexual, derogatory, and toxic). This filter level controls how aggressively to filter out potentially harmful content from responses. See the [documentation](http://firebase.google.com/docs/vertex-ai/generate-images) and the [Responsible AI and usage guidelines](https://cloud.google.com/vertex-ai/generative-ai/docs/image/responsible-ai-imagen#safety-filters) for more details. | -| [InferenceMode](./ai.md#inferencemode) | (EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. | +| [InferenceMode](./ai.md#inferencemode) | (Public Preview) Determines whether inference happens on-device or in-cloud. | | [Language](./ai.md#language) | The programming language of the code. | -| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | (EXPERIMENTAL) Content formats that can be provided as on-device message content. | -| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | (EXPERIMENTAL) Allowable roles for on-device language model usage. | -| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | (EXPERIMENTAL) Allowable types for on-device language model messages. | +| [LanguageModelMessageContentValue](./ai.md#languagemodelmessagecontentvalue) | (Public Preview) Content formats that can be provided as on-device message content. | +| [LanguageModelMessageRole](./ai.md#languagemodelmessagerole) | (Public Preview) Allowable roles for on-device language model usage. | +| [LanguageModelMessageType](./ai.md#languagemodelmessagetype) | (Public Preview) Allowable types for on-device language model messages. | | [LiveResponseType](./ai.md#liveresponsetype) | (Public Preview) The types of responses that can be returned by [LiveSession.receive()](./ai.livesession.md#livesessionreceive). This is a property on all messages that can be used for type narrowing. This property is not returned by the server, it is assigned to a server message object once it's parsed. | | [Modality](./ai.md#modality) | Content part modality. | | [Outcome](./ai.md#outcome) | Represents the result of the code execution. | @@ -631,7 +631,10 @@ ImagenSafetyFilterLevel: { ## InferenceMode -(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Determines whether inference happens on-device or in-cloud. PREFER\_ON\_DEVICE: Attempt to make inference calls using an on-device model. If on-device inference is not available, the SDK will fall back to using a cloud-hosted model.
ONLY\_ON\_DEVICE: Only attempt to make inference calls using an on-device model. The SDK will not fall back to a cloud-hosted model. If on-device inference is not available, inference methods will throw.
ONLY\_IN\_CLOUD: Only attempt to make inference calls using a cloud-hosted model. The SDK will not fall back to an on-device model.
PREFER\_IN\_CLOUD: Attempt to make inference calls to a cloud-hosted model. If not available, the SDK will fall back to an on-device model. @@ -900,7 +903,10 @@ export type ImagenSafetyFilterLevel = (typeof ImagenSafetyFilterLevel)[keyof typ ## InferenceMode -(EXPERIMENTAL) Determines whether inference happens on-device or in-cloud. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Determines whether inference happens on-device or in-cloud. Signature: @@ -920,7 +926,10 @@ export type Language = (typeof Language)[keyof typeof Language]; ## LanguageModelMessageContentValue -(EXPERIMENTAL) Content formats that can be provided as on-device message content. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Content formats that can be provided as on-device message content. Signature: @@ -930,7 +939,10 @@ export type LanguageModelMessageContentValue = ImageBitmapSource | AudioBuffer | ## LanguageModelMessageRole -(EXPERIMENTAL) Allowable roles for on-device language model usage. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Allowable roles for on-device language model usage. Signature: @@ -940,7 +952,10 @@ export type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; ## LanguageModelMessageType -(EXPERIMENTAL) Allowable types for on-device language model messages. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Allowable types for on-device language model messages. Signature: diff --git a/docs-devsite/ai.ondeviceparams.md b/docs-devsite/ai.ondeviceparams.md index bce68ff8174..363427149f9 100644 --- a/docs-devsite/ai.ondeviceparams.md +++ b/docs-devsite/ai.ondeviceparams.md @@ -10,7 +10,10 @@ https://github.com/firebase/firebase-js-sdk {% endcomment %} # OnDeviceParams interface -(EXPERIMENTAL) Encapsulates configuration for on-device inference. +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + +Encapsulates configuration for on-device inference. Signature: @@ -22,11 +25,14 @@ export interface OnDeviceParams | Property | Type | Description | | --- | --- | --- | -| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | | -| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | | +| [createOptions](./ai.ondeviceparams.md#ondeviceparamscreateoptions) | [LanguageModelCreateOptions](./ai.languagemodelcreateoptions.md#languagemodelcreateoptions_interface) | (Public Preview) | +| [promptOptions](./ai.ondeviceparams.md#ondeviceparamspromptoptions) | [LanguageModelPromptOptions](./ai.languagemodelpromptoptions.md#languagemodelpromptoptions_interface) | (Public Preview) | ## OnDeviceParams.createOptions +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript @@ -35,6 +41,9 @@ createOptions?: LanguageModelCreateOptions; ## OnDeviceParams.promptOptions +> This API is provided as a preview for developers and may change based on feedback that we receive. Do not use this API in a production environment. +> + Signature: ```typescript diff --git a/packages/ai/src/types/chrome-adapter.ts b/packages/ai/src/types/chrome-adapter.ts index 9ec0dc2a0ab..fc33325217f 100644 --- a/packages/ai/src/types/chrome-adapter.ts +++ b/packages/ai/src/types/chrome-adapter.ts @@ -18,13 +18,13 @@ import { CountTokensRequest, GenerateContentRequest } from './requests'; /** - * (EXPERIMENTAL) Defines an inference "backend" that uses Chrome's on-device model, + * Defines an inference "backend" that uses Chrome's on-device model, * and encapsulates logic for detecting when on-device inference is * possible. * * These methods should not be called directly by the user. * - * @public + * @beta */ export interface ChromeAdapter { /** diff --git a/packages/ai/src/types/enums.ts b/packages/ai/src/types/enums.ts index 177dde296b7..997df1265b0 100644 --- a/packages/ai/src/types/enums.ts +++ b/packages/ai/src/types/enums.ts @@ -345,7 +345,6 @@ export type ResponseModality = (typeof ResponseModality)[keyof typeof ResponseModality]; /** - * (EXPERIMENTAL) * Determines whether inference happens on-device or in-cloud. * * @remarks @@ -364,7 +363,7 @@ export type ResponseModality = * cloud-hosted model. If not available, the SDK will fall back to an * on-device model. * - * @public + * @beta */ export const InferenceMode = { 'PREFER_ON_DEVICE': 'prefer_on_device', @@ -374,10 +373,9 @@ export const InferenceMode = { } as const; /** - * (EXPERIMENTAL) * Determines whether inference happens on-device or in-cloud. * - * @public + * @beta */ export type InferenceMode = (typeof InferenceMode)[keyof typeof InferenceMode]; diff --git a/packages/ai/src/types/language-model.ts b/packages/ai/src/types/language-model.ts index 4157e6d05e6..9ac4c7202e1 100644 --- a/packages/ai/src/types/language-model.ts +++ b/packages/ai/src/types/language-model.ts @@ -50,9 +50,8 @@ export enum Availability { } /** - * (EXPERIMENTAL) * Configures the creation of an on-device language model session. - * @public + * @beta */ export interface LanguageModelCreateCoreOptions { topK?: number; @@ -61,9 +60,8 @@ export interface LanguageModelCreateCoreOptions { } /** - * (EXPERIMENTAL) * Configures the creation of an on-device language model session. - * @public + * @beta */ export interface LanguageModelCreateOptions extends LanguageModelCreateCoreOptions { @@ -72,9 +70,8 @@ export interface LanguageModelCreateOptions } /** - * (EXPERIMENTAL) * Options for an on-device language model prompt. - * @public + * @beta */ export interface LanguageModelPromptOptions { responseConstraint?: object; @@ -82,25 +79,22 @@ export interface LanguageModelPromptOptions { } /** - * (EXPERIMENTAL) * Options for the expected inputs for an on-device language model. - * @public + * @beta */ export interface LanguageModelExpected { type: LanguageModelMessageType; languages?: string[]; } /** - * (EXPERIMENTAL) * An on-device language model prompt. - * @public + * @beta */ export type LanguageModelPrompt = LanguageModelMessage[]; /** - * (EXPERIMENTAL) * An on-device language model message. - * @public + * @beta */ export interface LanguageModelMessage { role: LanguageModelMessageRole; @@ -108,9 +102,8 @@ export interface LanguageModelMessage { } /** - * (EXPERIMENTAL) * An on-device language model content object. - * @public + * @beta */ export interface LanguageModelMessageContent { type: LanguageModelMessageType; @@ -118,23 +111,20 @@ export interface LanguageModelMessageContent { } /** - * (EXPERIMENTAL) * Allowable roles for on-device language model usage. - * @public + * @beta */ export type LanguageModelMessageRole = 'system' | 'user' | 'assistant'; /** - * (EXPERIMENTAL) * Allowable types for on-device language model messages. - * @public + * @beta */ export type LanguageModelMessageType = 'text' | 'image' | 'audio'; /** - * (EXPERIMENTAL) * Content formats that can be provided as on-device message content. - * @public + * @beta */ export type LanguageModelMessageContentValue = | ImageBitmapSource diff --git a/packages/ai/src/types/requests.ts b/packages/ai/src/types/requests.ts index 0ce87d0c8da..eea03e55fe4 100644 --- a/packages/ai/src/types/requests.ts +++ b/packages/ai/src/types/requests.ts @@ -356,10 +356,9 @@ export interface FunctionCallingConfig { } /** - * (EXPERIMENTAL) * Encapsulates configuration for on-device inference. * - * @public + * @beta */ export interface OnDeviceParams { createOptions?: LanguageModelCreateOptions; @@ -367,9 +366,8 @@ export interface OnDeviceParams { } /** - * (EXPERIMENTAL) * Configures hybrid inference. - * @public + * @beta */ export interface HybridParams { /**