GenerationConfig.swift 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. // Copyright 2023 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. import Foundation
  15. /// A struct defining model parameters to be used when sending generative AI
  16. /// requests to the backend model.
  17. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, *)
  18. public struct GenerationConfig {
  19. /// A parameter controlling the degree of randomness in token selection. A
  20. /// temperature of zero is deterministic, always choosing the
  21. /// highest-probability response. Typical values are between 0 and 1
  22. /// inclusive. Defaults to 0 if unspecified.
  23. public let temperature: Float?
  24. /// The `topP` parameter changes how the model selects tokens for output.
  25. /// Tokens are selected from the most to least probable until the sum of
  26. /// their probabilities equals the `topP` value. For example, if tokens A, B,
  27. /// and C have probabilities of 0.3, 0.2, and 0.1 respectively and the topP
  28. /// value is 0.5, then the model will select either A or B as the next token
  29. /// by using the `temperature` and exclude C as a candidate.
  30. /// Defaults to 0.95 if unset.
  31. public let topP: Float?
  32. /// The `topK` parameter changes how the model selects tokens for output. A
  33. /// `topK` of 1 means the selected token is the most probable among all the
  34. /// tokens in the model's vocabulary, while a `topK` of 3 means that the next
  35. /// token is selected from among the 3 most probable using the `temperature`.
  36. /// For each token selection step, the `topK` tokens with the highest
  37. /// probabilities are sampled. Tokens are then further filtered based on
  38. /// `topP` with the final token selected using `temperature` sampling.
  39. /// Defaults to 40 if unspecified.
  40. public let topK: Int?
  41. /// The maximum number of generated response messages to return. This value
  42. /// must be between [1, 8], inclusive. If unset, this will default to 1.
  43. ///
  44. /// - Note: Only unique candidates are returned. Higher temperatures are more
  45. /// likely to produce unique candidates. Setting `temperature` to 0 will
  46. /// always produce exactly one candidate regardless of the
  47. /// `candidateCount`.
  48. public let candidateCount: Int?
  49. /// Specifies the maximum number of tokens that can be generated in the
  50. /// response. The number of tokens per word varies depending on the
  51. /// language outputted. The maximum value is capped at 1024. Defaults to 0
  52. /// (unbounded).
  53. public let maxOutputTokens: Int?
  54. /// A set of up to 5 `String`s that will stop output generation. If
  55. /// specified, the API will stop at the first appearance of a stop sequence.
  56. /// The stop sequence will not be included as part of the response.
  57. public let stopSequences: [String]?
  58. /// Output response MIME type of the generated candidate text.
  59. ///
  60. /// Supported MIME types:
  61. /// - `text/plain`: Text output; the default behavior if unspecified.
  62. /// - `application/json`: JSON response in the candidates.
  63. public let responseMIMEType: String?
  64. /// Creates a new `GenerationConfig` value.
  65. ///
  66. /// - Parameter temperature: See ``temperature``
  67. /// - Parameter topP: See ``topP``
  68. /// - Parameter topK: See ``topK``
  69. /// - Parameter candidateCount: See ``candidateCount``
  70. /// - Parameter maxOutputTokens: See ``maxOutputTokens``
  71. /// - Parameter stopSequences: See ``stopSequences``
  72. public init(temperature: Float? = nil, topP: Float? = nil, topK: Int? = nil,
  73. candidateCount: Int? = nil, maxOutputTokens: Int? = nil,
  74. stopSequences: [String]? = nil, responseMIMEType: String? = nil) {
  75. // Explicit init because otherwise if we re-arrange the above variables it changes the API
  76. // surface.
  77. self.temperature = temperature
  78. self.topP = topP
  79. self.topK = topK
  80. self.candidateCount = candidateCount
  81. self.maxOutputTokens = maxOutputTokens
  82. self.stopSequences = stopSequences
  83. self.responseMIMEType = responseMIMEType
  84. }
  85. }
  86. // MARK: - Codable Conformances
  87. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, *)
  88. extension GenerationConfig: Encodable {}