Эх сурвалжийг харах

fix(ai): Fix broken links and update docs per cl (#15399)

Daymon 5 сар өмнө
parent
commit
455d291111

+ 3 - 0
FirebaseAI/CHANGELOG.md

@@ -1,3 +1,6 @@
+# Unreleased
+- [fixed] Fixed various links in the Live API doc comments not mapping correctly.
+
 # 12.4.0
 # 12.4.0
 - [feature] Added support for the URL context tool, which allows the model to access content
 - [feature] Added support for the URL context tool, which allows the model to access content
   from provided public web URLs to inform and enhance its responses. (#15221)
   from provided public web URLs to inform and enhance its responses. (#15221)

+ 9 - 12
FirebaseAI/Sources/FirebaseAI.swift

@@ -63,7 +63,7 @@ public final class FirebaseAI: Sendable {
   /// guidance on choosing an appropriate model for your use case.
   /// guidance on choosing an appropriate model for your use case.
   ///
   ///
   /// - Parameters:
   /// - Parameters:
-  ///   - modelName: The name of the model to use, for example `"gemini-1.5-flash"`; see
+  ///   - modelName: The name of the model to use; see
   ///     [available model names
   ///     [available model names
   ///     ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
   ///     ](https://firebase.google.com/docs/vertex-ai/gemini-models#available-model-names) for a
   ///     list of supported model names.
   ///     list of supported model names.
@@ -106,12 +106,11 @@ public final class FirebaseAI: Sendable {
 
 
   /// Initializes an ``ImagenModel`` with the given parameters.
   /// Initializes an ``ImagenModel`` with the given parameters.
   ///
   ///
-  /// > Important: Only Imagen 3 models (named `imagen-3.0-*`) are supported.
+  /// - Note: Refer to [Imagen models](https://firebase.google.com/docs/vertex-ai/models) for
+  /// guidance on choosing an appropriate model for your use case.
   ///
   ///
   /// - Parameters:
   /// - Parameters:
-  ///   - modelName: The name of the Imagen 3 model to use, for example `"imagen-3.0-generate-002"`;
-  ///     see [model versions](https://firebase.google.com/docs/vertex-ai/models) for a list of
-  ///     supported Imagen 3 models.
+  ///   - modelName: The name of the Imagen 3 model to use.
   ///   - generationConfig: Configuration options for generating images with Imagen.
   ///   - generationConfig: Configuration options for generating images with Imagen.
   ///   - safetySettings: Settings describing what types of potentially harmful content your model
   ///   - safetySettings: Settings describing what types of potentially harmful content your model
   ///     should allow.
   ///     should allow.
@@ -138,18 +137,16 @@ public final class FirebaseAI: Sendable {
 
 
   /// **[Public Preview]** Initializes a ``LiveGenerativeModel`` with the given parameters.
   /// **[Public Preview]** Initializes a ``LiveGenerativeModel`` with the given parameters.
   ///
   ///
+  /// - Note: Refer to [the Firebase docs on the Live
+  /// API](https://firebase.google.com/docs/ai-logic/live-api#models-that-support-capability) for
+  /// guidance on choosing an appropriate model for your use case.
+  ///
   /// > Warning: Using the Firebase AI Logic SDKs with the Gemini Live API is in Public
   /// > Warning: Using the Firebase AI Logic SDKs with the Gemini Live API is in Public
   /// Preview, which means that the feature is not subject to any SLA or deprecation policy and
   /// Preview, which means that the feature is not subject to any SLA or deprecation policy and
   /// could change in backwards-incompatible ways.
   /// could change in backwards-incompatible ways.
   ///
   ///
-  /// > Important: Only models that support the Gemini Live API (typically containing `live-*` in
-  /// the name) are supported.
-  ///
   /// - Parameters:
   /// - Parameters:
-  ///   - modelName: The name of the model to use, for example
-  ///     `"gemini-live-2.5-flash-preview"`;
-  ///     see [model versions](https://firebase.google.com/docs/ai-logic/live-api?api=dev#models-that-support-capability)
-  ///     for a list of supported models.
+  ///   - modelName: The name of the model to use.
   ///   - generationConfig: The content generation parameters your model should use.
   ///   - generationConfig: The content generation parameters your model should use.
   ///   - tools: A list of ``Tool`` objects that the model may use to generate the next response.
   ///   - tools: A list of ``Tool`` objects that the model may use to generate the next response.
   ///   - toolConfig: Tool configuration for any ``Tool`` specified in the request.
   ///   - toolConfig: Tool configuration for any ``Tool`` specified in the request.

+ 1 - 1
FirebaseAI/Sources/GenerativeModel.swift

@@ -59,7 +59,7 @@ public final class GenerativeModel: Sendable {
   /// Initializes a new remote model with the given parameters.
   /// Initializes a new remote model with the given parameters.
   ///
   ///
   /// - Parameters:
   /// - Parameters:
-  ///   - modelName: The name of the model, for example "gemini-2.0-flash".
+  ///   - modelName: The name of the model.
   ///   - modelResourceName: The model resource name corresponding with `modelName` in the backend.
   ///   - modelResourceName: The model resource name corresponding with `modelName` in the backend.
   ///     The form depends on the backend and will be one of:
   ///     The form depends on the backend and will be one of:
   ///       - Vertex AI via Firebase AI SDK:
   ///       - Vertex AI via Firebase AI SDK:

+ 4 - 3
FirebaseAI/Sources/Types/Public/Live/LiveGenerationConfig.swift

@@ -107,13 +107,14 @@ public struct LiveGenerationConfig: Sendable {
   ///     the model.
   ///     the model.
   ///
   ///
   ///     Input transcripts are the model's interpretation of audio data sent to it, and they are
   ///     Input transcripts are the model's interpretation of audio data sent to it, and they are
-  ///     populated in model responses via ``LiveServerContent``. When this field is set to `nil`,
-  ///     input transcripts are not populated in model responses.
+  ///     populated in model responses via ``LiveServerContent/inputAudioTranscription``. When this
+  ///     field is set to `nil`, input transcripts are not populated in model responses.
   ///   - outputAudioTranscription: Configures (and enables) output transcriptions when streaming to
   ///   - outputAudioTranscription: Configures (and enables) output transcriptions when streaming to
   ///     the model.
   ///     the model.
   ///
   ///
   ///     Output transcripts are text representations of the audio the model is sending to the
   ///     Output transcripts are text representations of the audio the model is sending to the
-  ///     client, and they are populated in model responses via ``LiveServerContent``. When this
+  ///     client, and they are populated in model responses via
+  ///     ``LiveServerContent/outputAudioTranscription``. When this
   ///     field is set to `nil`, output transcripts are not populated in model responses.
   ///     field is set to `nil`, output transcripts are not populated in model responses.
   ///
   ///
   ///     > Important: Transcripts are independent to the model turn. This means transcripts may
   ///     > Important: Transcripts are independent to the model turn. This means transcripts may

+ 1 - 1
FirebaseAI/Sources/Types/Public/Live/LiveGenerativeModel.swift

@@ -17,7 +17,7 @@ import Foundation
 /// A multimodal model (like Gemini) capable of real-time content generation based on
 /// A multimodal model (like Gemini) capable of real-time content generation based on
 /// various input types, supporting bidirectional streaming.
 /// various input types, supporting bidirectional streaming.
 ///
 ///
-/// You can create a new session via ``connect()``.
+/// You can create a new session via ``LiveGenerativeModel/connect()``.
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(watchOS, unavailable)
 @available(watchOS, unavailable)
 public final class LiveGenerativeModel {
 public final class LiveGenerativeModel {

+ 10 - 8
FirebaseAI/Sources/Types/Public/Live/LiveServerContent.swift

@@ -45,13 +45,15 @@ public struct LiveServerContent: Sendable {
   /// The model has finished _generating_ data for the current turn.
   /// The model has finished _generating_ data for the current turn.
   ///
   ///
   /// For realtime playback, there will be a delay between when the model finishes generating
   /// For realtime playback, there will be a delay between when the model finishes generating
-  /// content and the client has finished playing back the generated content. `generationComplete`
-  /// indicates that the model is done generating data, while `isTurnComplete` indicates the model
-  /// is waiting for additional client messages. Sending a message during this delay may cause a
-  /// `wasInterrupted` message to be sent.
+  /// content and the client has finished playing back the generated content.
+  /// ``LiveServerContent/isGenerationComplete`` indicates that the model is done generating data,
+  /// while ``LiveServerContent/isTurnComplete`` indicates the model is waiting for additional
+  /// client messages. Sending a message during this delay may cause a
+  /// ``LiveServerContent/wasInterrupted`` message to be sent.
   ///
   ///
-  ///  Note that if the model `wasInterrupted`, this will not be set. The model will go from
-  /// `wasInterrupted` -> `turnComplete`.
+  /// > Important: If the model ``LiveServerContent/wasInterrupted``, this will not be set. The
+  /// > model will go from ``LiveServerContent/wasInterrupted`` ->
+  /// > ``LiveServerContent/isTurnComplete``.
   public var isGenerationComplete: Bool { serverContent.generationComplete ?? false }
   public var isGenerationComplete: Bool { serverContent.generationComplete ?? false }
 
 
   /// Metadata specifying the sources used to ground generated content.
   /// Metadata specifying the sources used to ground generated content.
@@ -60,7 +62,7 @@ public struct LiveServerContent: Sendable {
   /// The model's interpretation of what the client said in an audio message.
   /// The model's interpretation of what the client said in an audio message.
   ///
   ///
   /// This field is only populated when an ``AudioTranscriptionConfig`` is provided to
   /// This field is only populated when an ``AudioTranscriptionConfig`` is provided to
-  /// ``LiveGenerationConfig``.
+  /// the `inputAudioTranscription` field in ``LiveGenerationConfig``.
   public var inputAudioTranscription: LiveAudioTranscription? {
   public var inputAudioTranscription: LiveAudioTranscription? {
     serverContent.inputTranscription.map { LiveAudioTranscription($0) }
     serverContent.inputTranscription.map { LiveAudioTranscription($0) }
   }
   }
@@ -68,7 +70,7 @@ public struct LiveServerContent: Sendable {
   /// Transcription matching the model's audio response.
   /// Transcription matching the model's audio response.
   ///
   ///
   /// This field is only populated when an ``AudioTranscriptionConfig`` is provided to
   /// This field is only populated when an ``AudioTranscriptionConfig`` is provided to
-  /// ``LiveGenerationConfig``.
+  /// the  `outputAudioTranscription` field in ``LiveGenerationConfig``.
   ///
   ///
   /// > Important: Transcripts are independent to the model turn. This means transcripts may
   /// > Important: Transcripts are independent to the model turn. This means transcripts may
   /// > come earlier or later than when the model sends the corresponding audio responses.
   /// > come earlier or later than when the model sends the corresponding audio responses.

+ 2 - 2
FirebaseAI/Sources/Types/Public/Live/LiveServerToolCall.swift

@@ -14,8 +14,8 @@
 
 
 /// Request for the client to execute the provided ``functionCalls``.
 /// Request for the client to execute the provided ``functionCalls``.
 ///
 ///
-/// The client should return matching ``FunctionResponsePart``, where the `functionId` fields
-/// correspond to individual ``FunctionCallPart``s.
+/// The client should return matching ``FunctionResponsePart``, where the
+/// ``FunctionResponsePart/functionId`` fields correspond to individual ``FunctionCallPart``s.
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
 @available(watchOS, unavailable)
 @available(watchOS, unavailable)
 public struct LiveServerToolCall: Sendable {
 public struct LiveServerToolCall: Sendable {

+ 2 - 2
FirebaseAI/Sources/Types/Public/Live/LiveServerToolCallCancellation.swift

@@ -20,8 +20,8 @@
 @available(watchOS, unavailable)
 @available(watchOS, unavailable)
 public struct LiveServerToolCallCancellation: Sendable {
 public struct LiveServerToolCallCancellation: Sendable {
   let serverToolCallCancellation: BidiGenerateContentToolCallCancellation
   let serverToolCallCancellation: BidiGenerateContentToolCallCancellation
-  /// A list of `functionId`s matching the `functionId` provided in a previous
-  /// ``LiveServerToolCall``, where only the provided `functionId`s should be cancelled.
+  /// A list of function ids matching the ``FunctionCallPart/functionId`` provided in a previous
+  /// ``LiveServerToolCall``, where only the provided ids should be cancelled.
   public var ids: [String]? { serverToolCallCancellation.ids }
   public var ids: [String]? { serverToolCallCancellation.ids }
 
 
   init(_ serverToolCallCancellation: BidiGenerateContentToolCallCancellation) {
   init(_ serverToolCallCancellation: BidiGenerateContentToolCallCancellation) {

+ 6 - 5
FirebaseAI/Sources/Types/Public/Live/LiveSession.swift

@@ -16,9 +16,10 @@ import Foundation
 
 
 /// A live WebSocket session, capable of streaming content to and from the model.
 /// A live WebSocket session, capable of streaming content to and from the model.
 ///
 ///
-/// Messages are streamed through ``responses``, and can be sent through either the dedicated
-/// realtime API function (such as ``sendAudioRealtime(audio:)`` or ``sendTextRealtime(text:)``), or
-/// through the incremental API (such as ``sendContent(_:turnComplete:)``).
+/// Messages are streamed through ``LiveSession/responses``, and can be sent through either the
+/// dedicated realtime API function (such as ``LiveSession/sendAudioRealtime(_:)`` and
+/// ``LiveSession/sendTextRealtime(_:)``), or through the incremental API (such as
+/// ``LiveSession/sendContent(_:turnComplete:)-6x3ae``).
 ///
 ///
 /// To create an instance of this class, see ``LiveGenerativeModel``.
 /// To create an instance of this class, see ``LiveGenerativeModel``.
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
@@ -26,7 +27,7 @@ import Foundation
 public final class LiveSession: Sendable {
 public final class LiveSession: Sendable {
   private let service: LiveSessionService
   private let service: LiveSessionService
 
 
-  /// An asyncronous stream of messages from the server.
+  /// An asynchronous stream of messages from the server.
   ///
   ///
   /// These messages from the incremental updates from the model, for the current conversation.
   /// These messages from the incremental updates from the model, for the current conversation.
   public var responses: AsyncThrowingStream<LiveServerMessage, Error> { service.responses }
   public var responses: AsyncThrowingStream<LiveServerMessage, Error> { service.responses }
@@ -41,7 +42,7 @@ public final class LiveSession: Sendable {
   ///
   ///
   /// - Parameters:
   /// - Parameters:
   ///   - responses: Client generated function results, matched to their respective
   ///   - responses: Client generated function results, matched to their respective
-  ///     ``FunctionCallPart`` by the `functionId` field.
+  ///     ``FunctionCallPart`` by the ``FunctionCallPart/functionId`` field.
   public func sendFunctionResponses(_ responses: [FunctionResponsePart]) async {
   public func sendFunctionResponses(_ responses: [FunctionResponsePart]) async {
     let message = BidiGenerateContentToolResponse(
     let message = BidiGenerateContentToolResponse(
       functionResponses: responses.map { $0.functionResponse }
       functionResponses: responses.map { $0.functionResponse }

+ 8 - 4
FirebaseAI/Sources/Types/Public/Live/LiveSessionErrors.swift

@@ -20,7 +20,8 @@ import Foundation
 /// version, or that the model is just
 /// version, or that the model is just
 /// not supported.
 /// not supported.
 ///
 ///
-/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this.
+/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionUnsupportedMessageError/errorUserInfo``
+/// for the error that caused this.
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(watchOS, unavailable)
 @available(watchOS, unavailable)
 public struct LiveSessionUnsupportedMessageError: Error, Sendable, CustomNSError {
 public struct LiveSessionUnsupportedMessageError: Error, Sendable, CustomNSError {
@@ -40,7 +41,8 @@ public struct LiveSessionUnsupportedMessageError: Error, Sendable, CustomNSError
 
 
 /// The live session was closed, because the network connection was lost.
 /// The live session was closed, because the network connection was lost.
 ///
 ///
-/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this.
+/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionLostConnectionError/errorUserInfo`` for
+/// the error that caused this.
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(watchOS, unavailable)
 @available(watchOS, unavailable)
 public struct LiveSessionLostConnectionError: Error, Sendable, CustomNSError {
 public struct LiveSessionLostConnectionError: Error, Sendable, CustomNSError {
@@ -60,7 +62,8 @@ public struct LiveSessionLostConnectionError: Error, Sendable, CustomNSError {
 
 
 /// The live session was closed, but not for a reason the SDK expected.
 /// The live session was closed, but not for a reason the SDK expected.
 ///
 ///
-/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this.
+/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionUnexpectedClosureError/errorUserInfo``
+/// for the error that caused this.
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(watchOS, unavailable)
 @available(watchOS, unavailable)
 public struct LiveSessionUnexpectedClosureError: Error, Sendable, CustomNSError {
 public struct LiveSessionUnexpectedClosureError: Error, Sendable, CustomNSError {
@@ -83,7 +86,8 @@ public struct LiveSessionUnexpectedClosureError: Error, Sendable, CustomNSError
 /// This can occur due to the model not supporting the requested response modalities, the project
 /// This can occur due to the model not supporting the requested response modalities, the project
 /// not having access to the model, the model being invalid,  or some internal error.
 /// not having access to the model, the model being invalid,  or some internal error.
 ///
 ///
-/// Check the `NSUnderlyingErrorKey` entry in ``errorUserInfo`` for the error that caused this.
+/// Check the `NSUnderlyingErrorKey` entry in ``LiveSessionSetupError/errorUserInfo`` for the error
+/// that caused this.
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, *)
 @available(watchOS, unavailable)
 @available(watchOS, unavailable)
 public struct LiveSessionSetupError: Error, Sendable, CustomNSError {
 public struct LiveSessionSetupError: Error, Sendable, CustomNSError {

+ 1 - 1
FirebaseAI/Sources/Types/Public/Live/SpeechConfig.swift

@@ -24,7 +24,7 @@ public struct SpeechConfig: Sendable {
     self.speechConfig = speechConfig
     self.speechConfig = speechConfig
   }
   }
 
 
-  /// Creates a new `LiveSpeechConfig` value.
+  /// Creates a new ``SpeechConfig`` value.
   ///
   ///
   /// - Parameters:
   /// - Parameters:
   ///   - voiceName: The name of the prebuilt voice to be used for the model's speech response.
   ///   - voiceName: The name of the prebuilt voice to be used for the model's speech response.

+ 4 - 3
FirebaseAI/Sources/Types/Public/Part.swift

@@ -173,7 +173,7 @@ public struct FunctionCallPart: Part {
   ///   - name: The name of the function to call.
   ///   - name: The name of the function to call.
   ///   - args: The function parameters and values.
   ///   - args: The function parameters and values.
   ///   - id: Unique id of the function call. If present, the returned ``FunctionResponsePart``
   ///   - id: Unique id of the function call. If present, the returned ``FunctionResponsePart``
-  ///     should have a matching `id` field.
+  ///     should have a matching ``FunctionResponsePart/functionId`` field.
   public init(name: String, args: JSONObject, id: String? = nil) {
   public init(name: String, args: JSONObject, id: String? = nil) {
     self.init(FunctionCall(name: name, args: args, id: id), isThought: nil, thoughtSignature: nil)
     self.init(FunctionCall(name: name, args: args, id: id), isThought: nil, thoughtSignature: nil)
   }
   }
@@ -196,7 +196,7 @@ public struct FunctionResponsePart: Part {
   let _isThought: Bool?
   let _isThought: Bool?
   let thoughtSignature: String?
   let thoughtSignature: String?
 
 
-  /// Matching `id` for a ``FunctionCallPart``, if one was provided.
+  /// Matching ``FunctionCallPart/functionId`` for a ``FunctionCallPart``, if one was provided.
   public var functionId: String? { functionResponse.id }
   public var functionId: String? { functionResponse.id }
 
 
   /// The name of the function that was called.
   /// The name of the function that was called.
@@ -223,7 +223,8 @@ public struct FunctionResponsePart: Part {
   /// - Parameters:
   /// - Parameters:
   ///   - name: The name of the function that was called.
   ///   - name: The name of the function that was called.
   ///   - response: The function's response.
   ///   - response: The function's response.
-  ///   - functionId: Matching `functionId` for a ``FunctionCallPart``, if one was provided.
+  ///   - functionId: Matching ``FunctionCallPart/functionId`` for a ``FunctionCallPart``, if one
+  ///     was provided.
   public init(name: String, response: JSONObject, functionId: String? = nil) {
   public init(name: String, response: JSONObject, functionId: String? = nil) {
     self.init(
     self.init(
       FunctionResponse(name: name, response: response, id: functionId),
       FunctionResponse(name: name, response: response, id: functionId),