Jelajahi Sumber

[Firebase AI] Fix Google AI `useLimitedUseAppCheckTokens` config (#15423)

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Andrew Heard 5 bulan lalu
induk
melakukan
d5b6dec4ce

+ 19 - 23
FirebaseAI/Tests/TestApp/Tests/Integration/LiveSessionTests.swift

@@ -61,20 +61,25 @@ struct LiveSessionTests {
     static let yesOrNo = ModelContent(
       role: "system",
       parts: """
-        You can only respond with "yes" or "no".
+      You can only respond with "yes" or "no".
       """.trimmingCharacters(in: .whitespacesAndNewlines)
     )
 
     static let helloGoodbye = ModelContent(
       role: "system",
       parts: """
-        When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is broken".
+      When you hear "Hello" say "Goodbye". If you hear anything else, say "The audio file is \
+      broken".
       """.trimmingCharacters(in: .whitespacesAndNewlines)
     )
 
     static let lastNames = ModelContent(
       role: "system",
-      parts: "When you receive a message, if the message is a single word, assume it's the first name of a person, and call the getLastName tool to get the last name of said person. Only respond with the last name."
+      parts: """
+      When you receive a message, if the message is a single word, assume it's the first name of a \
+      person, and call the getLastName tool to get the last name of said person. Only respond with \
+      the last name.
+      """.trimmingCharacters(in: .whitespacesAndNewlines)
     )
 
     static let animalInVideo = ModelContent(
@@ -142,10 +147,9 @@ struct LiveSessionTests {
 
     let session = try await model.connect()
 
-    guard let audioFile = NSDataAsset(name: "hello") else {
-      Issue.record("Missing audio file 'hello.wav' in Assets")
-      return
-    }
+    let audioFile = try #require(
+      NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
+    )
     await session.sendAudioRealtime(audioFile.data)
     // The model can't infer that we're done speaking until we send null bytes
     await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
@@ -171,10 +175,9 @@ struct LiveSessionTests {
 
     let session = try await model.connect()
 
-    guard let audioFile = NSDataAsset(name: "hello") else {
-      Issue.record("Missing audio file 'hello.wav' in Assets")
-      return
-    }
+    let audioFile = try #require(
+      NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
+    )
     await session.sendAudioRealtime(audioFile.data)
     await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
 
@@ -281,7 +284,7 @@ struct LiveSessionTests {
   }
 
   @Test(arguments: arguments.filter {
-    // TODO: (b/450982184) Remove when vertex adds support
+    // TODO: (b/450982184) Remove when Vertex AI adds support for Function IDs and Cancellation
     switch $0.0.apiConfig.service {
     case .googleAI:
       true
@@ -291,12 +294,6 @@ struct LiveSessionTests {
   })
   func realtime_functionCalling_cancellation(_ config: InstanceConfig,
                                              modelName: String) async throws {
-    // TODO: (b/450982184) Remove when vertex adds support
-    guard case .googleAI = config.apiConfig.service else {
-      Issue.record("Vertex does not currently support function ids or function cancellation.")
-      return
-    }
-
     let model = FirebaseAI.componentInstance(config).liveModel(
       modelName: modelName,
       generationConfig: textConfig,
@@ -337,17 +334,16 @@ struct LiveSessionTests {
       generationConfig: audioConfig
     )
 
-    guard let audioFile = NSDataAsset(name: "hello") else {
-      Issue.record("Missing audio file 'hello.wav' in Assets")
-      return
-    }
+    let audioFile = try #require(
+      NSDataAsset(name: "hello"), "Missing audio file 'hello.wav' in Assets"
+    )
 
     try await retry(times: 3, delayInSeconds: 2.0) {
       let session = try await model.connect()
       await session.sendAudioRealtime(audioFile.data)
       await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))
 
-      // wait a second to allow the model to start generating (and cuase a proper interruption)
+      // Wait a second to allow the model to start generating (and cause a proper interruption)
       try await Task.sleep(nanoseconds: oneSecondInNanoseconds)
       await session.sendAudioRealtime(audioFile.data)
       await session.sendAudioRealtime(Data(repeating: 0, count: audioFile.data.count))

+ 2 - 1
FirebaseAI/Tests/TestApp/Tests/Utilities/InstanceConfig.swift

@@ -56,6 +56,7 @@ struct InstanceConfig: Equatable, Encodable {
     apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta)
   )
   static let googleAI_v1beta_appCheckLimitedUse = InstanceConfig(
+    useLimitedUseAppCheckTokens: true,
     apiConfig: APIConfig(service: .googleAI(endpoint: .firebaseProxyProd), version: .v1beta)
   )
   static let googleAI_v1beta_staging = InstanceConfig(
@@ -164,7 +165,7 @@ extension InstanceConfig: CustomTestStringConvertible {
     }
     let locationSuffix: String
     if case let .vertexAI(_, location: location) = apiConfig.service {
-      locationSuffix = location
+      locationSuffix = " - (\(location))"
     } else {
       locationSuffix = ""
     }