Safety.swift 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. // Copyright 2023 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. import Foundation
  15. /// A type defining potentially harmful media categories and their model-assigned ratings. A value
  16. /// of this type may be assigned to a category for every model-generated response, not just
  17. /// responses that exceed a certain threshold.
  18. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  19. public struct SafetyRating: Equatable, Hashable, Sendable {
  20. /// The category describing the potential harm a piece of content may pose.
  21. ///
  22. /// See ``HarmCategory`` for a list of possible values.
  23. public let category: HarmCategory
  24. /// The model-generated probability that the content falls under the specified harm ``category``.
  25. ///
  26. /// See ``HarmProbability`` for a list of possible values. This is a discretized representation
  27. /// of the ``probabilityScore``.
  28. ///
  29. /// > Important: This does not indicate the severity of harm for a piece of content.
  30. public let probability: HarmProbability
  31. /// The confidence score that the response is associated with the corresponding harm ``category``.
  32. ///
  33. /// The probability safety score is a confidence score between 0.0 and 1.0, rounded to one decimal
  34. /// place; it is discretized into a ``HarmProbability`` in ``probability``. See [probability
  35. /// scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
  36. /// in the Google Cloud documentation for more details.
  37. public let probabilityScore: Float
  38. /// The severity reflects the magnitude of how harmful a model response might be.
  39. ///
  40. /// See ``HarmSeverity`` for a list of possible values. This is a discretized representation of
  41. /// the ``severityScore``.
  42. public let severity: HarmSeverity
  43. /// The severity score is the magnitude of how harmful a model response might be.
  44. ///
  45. /// The severity score ranges from 0.0 to 1.0, rounded to one decimal place; it is discretized
  46. /// into a ``HarmSeverity`` in ``severity``. See [severity scores](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#comparison_of_probability_scores_and_severity_scores)
  47. /// in the Google Cloud documentation for more details.
  48. public let severityScore: Float
  49. /// If true, the response was blocked.
  50. public let blocked: Bool
  51. /// Initializes a new `SafetyRating` instance with the given category and probability.
  52. /// Use this initializer for SwiftUI previews or tests.
  53. public init(category: HarmCategory,
  54. probability: HarmProbability,
  55. probabilityScore: Float,
  56. severity: HarmSeverity,
  57. severityScore: Float,
  58. blocked: Bool) {
  59. self.category = category
  60. self.probability = probability
  61. self.probabilityScore = probabilityScore
  62. self.severity = severity
  63. self.severityScore = severityScore
  64. self.blocked = blocked
  65. }
  66. /// The probability that a given model output falls under a harmful content category.
  67. ///
  68. /// > Note: This does not indicate the severity of harm for a piece of content.
  69. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  70. public struct HarmProbability: DecodableProtoEnum, Hashable, Sendable {
  71. enum Kind: String {
  72. case negligible = "NEGLIGIBLE"
  73. case low = "LOW"
  74. case medium = "MEDIUM"
  75. case high = "HIGH"
  76. }
  77. /// The probability is zero or close to zero.
  78. ///
  79. /// For benign content, the probability across all categories will be this value.
  80. public static let negligible = HarmProbability(kind: .negligible)
  81. /// The probability is small but non-zero.
  82. public static let low = HarmProbability(kind: .low)
  83. /// The probability is moderate.
  84. public static let medium = HarmProbability(kind: .medium)
  85. /// The probability is high.
  86. ///
  87. /// The content described is very likely harmful.
  88. public static let high = HarmProbability(kind: .high)
  89. /// Returns the raw string representation of the `HarmProbability` value.
  90. ///
  91. /// > Note: This value directly corresponds to the values in the [REST
  92. /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating).
  93. public let rawValue: String
  94. static let unrecognizedValueMessageCode =
  95. AILog.MessageCode.generateContentResponseUnrecognizedHarmProbability
  96. }
  97. /// The magnitude of how harmful a model response might be for the respective ``HarmCategory``.
  98. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  99. public struct HarmSeverity: DecodableProtoEnum, Hashable, Sendable {
  100. enum Kind: String {
  101. case negligible = "HARM_SEVERITY_NEGLIGIBLE"
  102. case low = "HARM_SEVERITY_LOW"
  103. case medium = "HARM_SEVERITY_MEDIUM"
  104. case high = "HARM_SEVERITY_HIGH"
  105. }
  106. /// Negligible level of harm severity.
  107. public static let negligible = HarmSeverity(kind: .negligible)
  108. /// Low level of harm severity.
  109. public static let low = HarmSeverity(kind: .low)
  110. /// Medium level of harm severity.
  111. public static let medium = HarmSeverity(kind: .medium)
  112. /// High level of harm severity.
  113. public static let high = HarmSeverity(kind: .high)
  114. /// Returns the raw string representation of the `HarmSeverity` value.
  115. ///
  116. /// > Note: This value directly corresponds to the values in the [REST
  117. /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#HarmSeverity).
  118. public let rawValue: String
  119. static let unrecognizedValueMessageCode =
  120. AILog.MessageCode.generateContentResponseUnrecognizedHarmSeverity
  121. }
  122. }
  123. /// A type used to specify a threshold for harmful content, beyond which the model will return a
  124. /// fallback response instead of generated content.
  125. ///
  126. /// See [safety settings for Gemini
  127. /// models](https://firebase.google.com/docs/vertex-ai/safety-settings?platform=ios#gemini) for
  128. /// more details.
  129. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  130. public struct SafetySetting: Sendable {
  131. /// Block at and beyond a specified ``SafetyRating/HarmProbability``.
  132. public struct HarmBlockThreshold: EncodableProtoEnum, Sendable {
  133. enum Kind: String {
  134. case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE"
  135. case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE"
  136. case blockOnlyHigh = "BLOCK_ONLY_HIGH"
  137. case blockNone = "BLOCK_NONE"
  138. case off = "OFF"
  139. }
  140. /// Content with `.negligible` will be allowed.
  141. public static let blockLowAndAbove = HarmBlockThreshold(kind: .blockLowAndAbove)
  142. /// Content with `.negligible` and `.low` will be allowed.
  143. public static let blockMediumAndAbove = HarmBlockThreshold(kind: .blockMediumAndAbove)
  144. /// Content with `.negligible`, `.low`, and `.medium` will be allowed.
  145. public static let blockOnlyHigh = HarmBlockThreshold(kind: .blockOnlyHigh)
  146. /// All content will be allowed.
  147. public static let blockNone = HarmBlockThreshold(kind: .blockNone)
  148. /// Turn off the safety filter.
  149. public static let off = HarmBlockThreshold(kind: .off)
  150. let rawValue: String
  151. }
  152. /// The method of computing whether the ``SafetySetting/HarmBlockThreshold`` has been exceeded.
  153. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  154. public struct HarmBlockMethod: EncodableProtoEnum, Sendable {
  155. enum Kind: String {
  156. case severity = "SEVERITY"
  157. case probability = "PROBABILITY"
  158. }
  159. /// Use both probability and severity scores.
  160. public static let severity = HarmBlockMethod(kind: .severity)
  161. /// Use only the probability score.
  162. public static let probability = HarmBlockMethod(kind: .probability)
  163. let rawValue: String
  164. }
  165. enum CodingKeys: String, CodingKey {
  166. case harmCategory = "category"
  167. case threshold
  168. case method
  169. }
  170. /// The category this safety setting should be applied to.
  171. public let harmCategory: HarmCategory
  172. /// The threshold describing what content should be blocked.
  173. public let threshold: HarmBlockThreshold
  174. /// The method of computing whether the ``threshold`` has been exceeded.
  175. public let method: HarmBlockMethod?
  176. /// Initializes a new safety setting with the given category and threshold.
  177. ///
  178. /// - Parameters:
  179. /// - harmCategory: The category this safety setting should be applied to.
  180. /// - threshold: The threshold describing what content should be blocked.
  181. /// - method: The method of computing whether the threshold has been exceeded; if not specified,
  182. /// the default method is ``HarmBlockMethod/severity`` for most models. See [harm block
  183. /// methods](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#how_to_configure_safety_filters)
  184. /// in the Google Cloud documentation for more details.
  185. /// > Note: For models older than `gemini-1.5-flash` and `gemini-1.5-pro`, the default method
  186. /// > is ``HarmBlockMethod/probability``.
  187. public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold,
  188. method: HarmBlockMethod? = nil) {
  189. self.harmCategory = harmCategory
  190. self.threshold = threshold
  191. self.method = method
  192. }
  193. }
  194. /// Categories describing the potential harm a piece of content may pose.
  195. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  196. public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
  197. enum Kind: String {
  198. case harassment = "HARM_CATEGORY_HARASSMENT"
  199. case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
  200. case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
  201. case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
  202. case civicIntegrity = "HARM_CATEGORY_CIVIC_INTEGRITY"
  203. }
  204. /// Harassment content.
  205. public static let harassment = HarmCategory(kind: .harassment)
  206. /// Negative or harmful comments targeting identity and/or protected attributes.
  207. public static let hateSpeech = HarmCategory(kind: .hateSpeech)
  208. /// Contains references to sexual acts or other lewd content.
  209. public static let sexuallyExplicit = HarmCategory(kind: .sexuallyExplicit)
  210. /// Promotes or enables access to harmful goods, services, or activities.
  211. public static let dangerousContent = HarmCategory(kind: .dangerousContent)
  212. /// Content that may be used to harm civic integrity.
  213. public static let civicIntegrity = HarmCategory(kind: .civicIntegrity)
  214. /// Returns the raw string representation of the `HarmCategory` value.
  215. ///
  216. /// > Note: This value directly corresponds to the values in the
  217. /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
  218. public let rawValue: String
  219. static let unrecognizedValueMessageCode =
  220. AILog.MessageCode.generateContentResponseUnrecognizedHarmCategory
  221. }
  222. // MARK: - Codable Conformances
  223. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  224. extension SafetyRating: Decodable {
  225. enum CodingKeys: CodingKey {
  226. case category
  227. case probability
  228. case probabilityScore
  229. case severity
  230. case severityScore
  231. case blocked
  232. }
  233. public init(from decoder: any Decoder) throws {
  234. let container = try decoder.container(keyedBy: CodingKeys.self)
  235. category = try container.decode(HarmCategory.self, forKey: .category)
  236. probability = try container.decode(HarmProbability.self, forKey: .probability)
  237. // The following 3 fields are only omitted in our test data.
  238. probabilityScore = try container.decodeIfPresent(Float.self, forKey: .probabilityScore) ?? 0.0
  239. severity = try container.decodeIfPresent(HarmSeverity.self, forKey: .severity) ??
  240. HarmSeverity(rawValue: "HARM_SEVERITY_UNSPECIFIED")
  241. severityScore = try container.decodeIfPresent(Float.self, forKey: .severityScore) ?? 0.0
  242. // The blocked field is only included when true.
  243. blocked = try container.decodeIfPresent(Bool.self, forKey: .blocked) ?? false
  244. }
  245. }
  246. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  247. extension SafetySetting.HarmBlockThreshold: Encodable {}
  248. @available(iOS 15.0, macOS 12.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  249. extension SafetySetting: Encodable {}