ConversationViewModel.swift 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. // Copyright 2023 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. import FirebaseVertexAI
  15. import Foundation
  16. import UIKit
  17. @MainActor
  18. class ConversationViewModel: ObservableObject {
  19. /// This array holds both the user's and the system's chat messages
  20. @Published var messages = [ChatMessage]()
  21. /// Indicates we're waiting for the model to finish
  22. @Published var busy = false
  23. @Published var error: Error?
  24. var hasError: Bool {
  25. return error != nil
  26. }
  27. private var model: GenerativeModel
  28. private var chat: Chat
  29. private var stopGenerating = false
  30. private var chatTask: Task<Void, Never>?
  31. init() {
  32. model = VertexAI.vertexAI().generativeModel(modelName: "gemini-1.5-flash")
  33. chat = model.startChat()
  34. }
  35. func sendMessage(_ text: String, streaming: Bool = true) async {
  36. error = nil
  37. if streaming {
  38. await internalSendMessageStreaming(text)
  39. } else {
  40. await internalSendMessage(text)
  41. }
  42. }
  43. func startNewChat() {
  44. stop()
  45. error = nil
  46. chat = model.startChat()
  47. messages.removeAll()
  48. }
  49. func stop() {
  50. chatTask?.cancel()
  51. error = nil
  52. }
  53. private func internalSendMessageStreaming(_ text: String) async {
  54. chatTask?.cancel()
  55. chatTask = Task {
  56. busy = true
  57. defer {
  58. busy = false
  59. }
  60. // first, add the user's message to the chat
  61. let userMessage = ChatMessage(message: text, participant: .user)
  62. messages.append(userMessage)
  63. // add a pending message while we're waiting for a response from the backend
  64. let systemMessage = ChatMessage.pending(participant: .system)
  65. messages.append(systemMessage)
  66. do {
  67. let responseStream = try chat.sendMessageStream(text)
  68. for try await chunk in responseStream {
  69. messages[messages.count - 1].pending = false
  70. if let text = chunk.text {
  71. messages[messages.count - 1].message += text
  72. }
  73. }
  74. } catch {
  75. self.error = error
  76. print(error.localizedDescription)
  77. messages.removeLast()
  78. }
  79. }
  80. }
  81. private func internalSendMessage(_ text: String) async {
  82. chatTask?.cancel()
  83. chatTask = Task {
  84. busy = true
  85. defer {
  86. busy = false
  87. }
  88. // first, add the user's message to the chat
  89. let userMessage = ChatMessage(message: text, participant: .user)
  90. messages.append(userMessage)
  91. // add a pending message while we're waiting for a response from the backend
  92. let systemMessage = ChatMessage.pending(participant: .system)
  93. messages.append(systemMessage)
  94. do {
  95. var response: GenerateContentResponse?
  96. response = try await chat.sendMessage(text)
  97. if let responseText = response?.text {
  98. // replace pending message with backend response
  99. messages[messages.count - 1].message = responseText
  100. messages[messages.count - 1].pending = false
  101. }
  102. } catch {
  103. self.error = error
  104. print(error.localizedDescription)
  105. messages.removeLast()
  106. }
  107. }
  108. }
  109. }