Safety.swift 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. // Copyright 2023 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. import Foundation
  15. /// A type defining potentially harmful media categories and their model-assigned ratings. A value
  16. /// of this type may be assigned to a category for every model-generated response, not just
  17. /// responses that exceed a certain threshold.
  18. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  19. public struct SafetyRating: Equatable, Hashable, Sendable {
  20. /// The category describing the potential harm a piece of content may pose.
  21. ///
  22. /// See ``HarmCategory`` for a list of possible values.
  23. public let category: HarmCategory
  24. /// The model-generated probability that the content falls under the specified harm ``category``.
  25. ///
  26. /// See ``HarmProbability`` for a list of possible values. This is a discretized representation
  27. /// of the ``probabilityScore``.
  28. ///
  29. /// > Important: This does not indicate the severity of harm for a piece of content.
  30. public let probability: HarmProbability
  31. /// The confidence score that the response is associated with the corresponding harm ``category``.
  32. ///
  33. /// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
  34. /// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
  35. /// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
  36. /// in the Google Cloud documentation for more details.
  37. public let probabilityScore: Float
  38. /// The severity reflects the magnitude of how harmful a model response might be.
  39. ///
  40. /// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
  41. /// the ``severityScore``.
  42. public let severity: HarmSeverity
  43. /// The severity score is the magnitude of how harmful a model response might be.
  44. ///
  45. /// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
  46. /// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
  47. /// in the Google Cloud documentation for more details.
  48. public let severityScore: Float
  49. /// If true, the response was blocked.
  50. public let blocked: Bool
  51. /// Initializes a new `SafetyRating` instance with the given category and probability.
  52. /// Use this initializer for SwiftUI previews or tests.
  53. public init(category: HarmCategory,
  54. probability: HarmProbability,
  55. probabilityScore: Float,
  56. severity: HarmSeverity,
  57. severityScore: Float,
  58. blocked: Bool) {
  59. self.category = category
  60. self.probability = probability
  61. self.probabilityScore = probabilityScore
  62. self.severity = severity
  63. self.severityScore = severityScore
  64. self.blocked = blocked
  65. }
  66. /// The probability that a given model output falls under a harmful content category.
  67. ///
  68. /// > Note: This does not indicate the severity of harm for a piece of content.
  69. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  70. public struct HarmProbability: DecodableProtoEnum, Hashable, Sendable {
  71. enum Kind: String {
  72. case unspecified = "HARM_PROBABILITY_UNSPECIFIED"
  73. case negligible = "NEGLIGIBLE"
  74. case low = "LOW"
  75. case medium = "MEDIUM"
  76. case high = "HIGH"
  77. }
  78. /// Internal-only; harm probability is unknown or unspecified by the backend.
  79. static let unspecified = HarmProbability(kind: .unspecified)
  80. /// The probability is zero or close to zero.
  81. ///
  82. /// For benign content, the probability across all categories will be this value.
  83. public static let negligible = HarmProbability(kind: .negligible)
  84. /// The probability is small but non-zero.
  85. public static let low = HarmProbability(kind: .low)
  86. /// The probability is moderate.
  87. public static let medium = HarmProbability(kind: .medium)
  88. /// The probability is high.
  89. ///
  90. /// The content described is very likely harmful.
  91. public static let high = HarmProbability(kind: .high)
  92. /// Returns the raw string representation of the `HarmProbability` value.
  93. ///
  94. /// > Note: This value directly corresponds to the values in the [REST
  95. /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating).
  96. public let rawValue: String
  97. static let unrecognizedValueMessageCode =
  98. AILog.MessageCode.generateContentResponseUnrecognizedHarmProbability
  99. }
  100. /// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
  101. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  102. public struct HarmSeverity: DecodableProtoEnum, Hashable, Sendable {
  103. enum Kind: String {
  104. case unspecified = "HARM_SEVERITY_UNSPECIFIED"
  105. case negligible = "HARM_SEVERITY_NEGLIGIBLE"
  106. case low = "HARM_SEVERITY_LOW"
  107. case medium = "HARM_SEVERITY_MEDIUM"
  108. case high = "HARM_SEVERITY_HIGH"
  109. }
  110. /// Internal-only; harm severity is unknown or unspecified by the backend.
  111. static let unspecified: HarmSeverity = .init(kind: .unspecified)
  112. /// Negligible level of harm severity.
  113. public static let negligible = HarmSeverity(kind: .negligible)
  114. /// Low level of harm severity.
  115. public static let low = HarmSeverity(kind: .low)
  116. /// Medium level of harm severity.
  117. public static let medium = HarmSeverity(kind: .medium)
  118. /// High level of harm severity.
  119. public static let high = HarmSeverity(kind: .high)
  120. /// Returns the raw string representation of the `HarmSeverity` value.
  121. ///
  122. /// > Note: This value directly corresponds to the values in the [REST
  123. /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#HarmSeverity).
  124. public let rawValue: String
  125. static let unrecognizedValueMessageCode =
  126. AILog.MessageCode.generateContentResponseUnrecognizedHarmSeverity
  127. }
  128. }
  129. /// A type used to specify a threshold for harmful content, beyond which the model will return a
  130. /// fallback response instead of generated content.
  131. ///
  132. /// See [safety settings for Gemini
  133. /// models](https://firebase.google.com/docs/vertex-ai/safety-settings?platform=ios#gemini) for
  134. /// more details.
  135. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  136. public struct SafetySetting: Sendable {
  137. /// Block at and beyond a specified ``SafetyRating/HarmProbability``.
  138. public struct HarmBlockThreshold: EncodableProtoEnum, Sendable {
  139. enum Kind: String {
  140. case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE"
  141. case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE"
  142. case blockOnlyHigh = "BLOCK_ONLY_HIGH"
  143. case blockNone = "BLOCK_NONE"
  144. case off = "OFF"
  145. }
  146. /// Content with `.negligible` will be allowed.
  147. public static let blockLowAndAbove = HarmBlockThreshold(kind: .blockLowAndAbove)
  148. /// Content with `.negligible` and `.low` will be allowed.
  149. public static let blockMediumAndAbove = HarmBlockThreshold(kind: .blockMediumAndAbove)
  150. /// Content with `.negligible`, `.low`, and `.medium` will be allowed.
  151. public static let blockOnlyHigh = HarmBlockThreshold(kind: .blockOnlyHigh)
  152. /// All content will be allowed.
  153. public static let blockNone = HarmBlockThreshold(kind: .blockNone)
  154. /// Turn off the safety filter.
  155. public static let off = HarmBlockThreshold(kind: .off)
  156. let rawValue: String
  157. }
  158. /// The method of computing whether the ``SafetySetting/HarmBlockThreshold`` has been exceeded.
  159. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  160. public struct HarmBlockMethod: EncodableProtoEnum, Sendable {
  161. enum Kind: String {
  162. case severity = "SEVERITY"
  163. case probability = "PROBABILITY"
  164. }
  165. /// Use both probability and severity scores.
  166. public static let severity = HarmBlockMethod(kind: .severity)
  167. /// Use only the probability score.
  168. public static let probability = HarmBlockMethod(kind: .probability)
  169. let rawValue: String
  170. }
  171. enum CodingKeys: String, CodingKey {
  172. case harmCategory = "category"
  173. case threshold
  174. case method
  175. }
  176. /// The category this safety setting should be applied to.
  177. public let harmCategory: HarmCategory
  178. /// The threshold describing what content should be blocked.
  179. public let threshold: HarmBlockThreshold
  180. /// The method of computing whether the ``threshold`` has been exceeded.
  181. public let method: HarmBlockMethod?
  182. /// Initializes a new safety setting with the given category and threshold.
  183. ///
  184. /// - Parameters:
  185. /// - harmCategory: The category this safety setting should be applied to.
  186. /// - threshold: The threshold describing what content should be blocked.
  187. /// - method: The method of computing whether the threshold has been exceeded; if not specified,
  188. /// the default method is ``HarmBlockMethod/severity`` for most models. See [harm block
  189. /// methods](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#how_to_configure_safety_filters)
  190. /// in the Google Cloud documentation for more details.
  191. /// > Note: For models older than `gemini-1.5-flash` and `gemini-1.5-pro`, the default method
  192. /// > is ``HarmBlockMethod/probability``.
  193. public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold,
  194. method: HarmBlockMethod? = nil) {
  195. self.harmCategory = harmCategory
  196. self.threshold = threshold
  197. self.method = method
  198. }
  199. }
  200. /// Categories describing the potential harm a piece of content may pose.
  201. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  202. public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
  203. enum Kind: String {
  204. case unspecified = "HARM_CATEGORY_UNSPECIFIED"
  205. case harassment = "HARM_CATEGORY_HARASSMENT"
  206. case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
  207. case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
  208. case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
  209. case civicIntegrity = "HARM_CATEGORY_CIVIC_INTEGRITY"
  210. }
  211. /// Internal-only; harm category is unknown or unspecified by the backend.
  212. static let unspecified = HarmCategory(kind: .unspecified)
  213. /// Harassment content.
  214. public static let harassment = HarmCategory(kind: .harassment)
  215. /// Negative or harmful comments targeting identity and/or protected attributes.
  216. public static let hateSpeech = HarmCategory(kind: .hateSpeech)
  217. /// Contains references to sexual acts or other lewd content.
  218. public static let sexuallyExplicit = HarmCategory(kind: .sexuallyExplicit)
  219. /// Promotes or enables access to harmful goods, services, or activities.
  220. public static let dangerousContent = HarmCategory(kind: .dangerousContent)
  221. /// Content that may be used to harm civic integrity.
  222. public static let civicIntegrity = HarmCategory(kind: .civicIntegrity)
  223. /// Returns the raw string representation of the `HarmCategory` value.
  224. ///
  225. /// > Note: This value directly corresponds to the values in the
  226. /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
  227. public let rawValue: String
  228. static let unrecognizedValueMessageCode =
  229. AILog.MessageCode.generateContentResponseUnrecognizedHarmCategory
  230. }
  231. // MARK: - Codable Conformances
  232. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  233. extension SafetyRating: Decodable {
  234. enum CodingKeys: CodingKey {
  235. case category
  236. case probability
  237. case probabilityScore
  238. case severity
  239. case severityScore
  240. case blocked
  241. }
  242. public init(from decoder: any Decoder) throws {
  243. let container = try decoder.container(keyedBy: CodingKeys.self)
  244. category = try container.decodeIfPresent(HarmCategory.self, forKey: .category) ?? .unspecified
  245. probability = try container.decodeIfPresent(
  246. HarmProbability.self, forKey: .probability
  247. ) ?? .unspecified
  248. // The following 3 fields are only provided when using the Vertex AI backend (not Google AI).
  249. probabilityScore = try container.decodeIfPresent(Float.self, forKey: .probabilityScore) ?? 0.0
  250. severity = try container.decodeIfPresent(HarmSeverity.self, forKey: .severity) ?? .unspecified
  251. severityScore = try container.decodeIfPresent(Float.self, forKey: .severityScore) ?? 0.0
  252. // The blocked field is only included when true.
  253. blocked = try container.decodeIfPresent(Bool.self, forKey: .blocked) ?? false
  254. }
  255. }
  256. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  257. extension SafetySetting.HarmBlockThreshold: Encodable {}
  258. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  259. extension SafetySetting: Encodable {}