Safety.swift 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. // Copyright 2023 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. import Foundation
  15. /// A type defining potentially harmful media categories and their model-assigned ratings. A value
  16. /// of this type may be assigned to a category for every model-generated response, not just
  17. /// responses that exceed a certain threshold.
  18. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  19. public struct SafetyRating: Equatable, Hashable, Sendable {
  20. /// The category describing the potential harm a piece of content may pose.
  21. ///
  22. /// See ``HarmCategory`` for a list of possible values.
  23. public let category: HarmCategory
  24. /// The model-generated probability that the content falls under the specified harm ``category``.
  25. ///
  26. /// See ``HarmProbability`` for a list of possible values.
  27. ///
  28. /// > Important: This does not indicate the severity of harm for a piece of content.
  29. public let probability: HarmProbability
  30. /// Initializes a new `SafetyRating` instance with the given category and probability.
  31. /// Use this initializer for SwiftUI previews or tests.
  32. public init(category: HarmCategory, probability: HarmProbability) {
  33. self.category = category
  34. self.probability = probability
  35. }
  36. /// The probability that a given model output falls under a harmful content category.
  37. ///
  38. /// > Note: This does not indicate the severity of harm for a piece of content.
  39. public struct HarmProbability: DecodableProtoEnum, Hashable, Sendable {
  40. enum Kind: String {
  41. case negligible = "NEGLIGIBLE"
  42. case low = "LOW"
  43. case medium = "MEDIUM"
  44. case high = "HIGH"
  45. }
  46. /// The probability is zero or close to zero.
  47. ///
  48. /// For benign content, the probability across all categories will be this value.
  49. public static let negligible = HarmProbability(kind: .negligible)
  50. /// The probability is small but non-zero.
  51. public static let low = HarmProbability(kind: .low)
  52. /// The probability is moderate.
  53. public static let medium = HarmProbability(kind: .medium)
  54. /// The probability is high.
  55. ///
  56. /// The content described is very likely harmful.
  57. public static let high = HarmProbability(kind: .high)
  58. /// Returns the raw string representation of the `HarmProbability` value.
  59. ///
  60. /// > Note: This value directly corresponds to the values in the [REST
  61. /// > API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/GenerateContentResponse#SafetyRating).
  62. public let rawValue: String
  63. static let unrecognizedValueMessageCode =
  64. VertexLog.MessageCode.generateContentResponseUnrecognizedHarmProbability
  65. }
  66. }
  67. /// A type used to specify a threshold for harmful content, beyond which the model will return a
  68. /// fallback response instead of generated content.
  69. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  70. public struct SafetySetting {
  71. /// Block at and beyond a specified ``SafetyRating/HarmProbability``.
  72. public struct HarmBlockThreshold: EncodableProtoEnum, Sendable {
  73. enum Kind: String {
  74. case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE"
  75. case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE"
  76. case blockOnlyHigh = "BLOCK_ONLY_HIGH"
  77. case blockNone = "BLOCK_NONE"
  78. case off = "OFF"
  79. }
  80. /// Content with `.negligible` will be allowed.
  81. public static let blockLowAndAbove = HarmBlockThreshold(kind: .blockLowAndAbove)
  82. /// Content with `.negligible` and `.low` will be allowed.
  83. public static let blockMediumAndAbove = HarmBlockThreshold(kind: .blockMediumAndAbove)
  84. /// Content with `.negligible`, `.low`, and `.medium` will be allowed.
  85. public static let blockOnlyHigh = HarmBlockThreshold(kind: .blockOnlyHigh)
  86. /// All content will be allowed.
  87. public static let blockNone = HarmBlockThreshold(kind: .blockNone)
  88. /// Turn off the safety filter.
  89. public static let off = HarmBlockThreshold(kind: .off)
  90. let rawValue: String
  91. }
  92. enum CodingKeys: String, CodingKey {
  93. case harmCategory = "category"
  94. case threshold
  95. }
  96. /// The category this safety setting should be applied to.
  97. public let harmCategory: HarmCategory
  98. /// The threshold describing what content should be blocked.
  99. public let threshold: HarmBlockThreshold
  100. /// Initializes a new safety setting with the given category and threshold.
  101. public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold) {
  102. self.harmCategory = harmCategory
  103. self.threshold = threshold
  104. }
  105. }
  106. /// Categories describing the potential harm a piece of content may pose.
  107. public struct HarmCategory: CodableProtoEnum, Hashable, Sendable {
  108. enum Kind: String {
  109. case harassment = "HARM_CATEGORY_HARASSMENT"
  110. case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
  111. case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
  112. case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
  113. case civicIntegrity = "HARM_CATEGORY_CIVIC_INTEGRITY"
  114. }
  115. /// Harassment content.
  116. public static let harassment = HarmCategory(kind: .harassment)
  117. /// Negative or harmful comments targeting identity and/or protected attributes.
  118. public static let hateSpeech = HarmCategory(kind: .hateSpeech)
  119. /// Contains references to sexual acts or other lewd content.
  120. public static let sexuallyExplicit = HarmCategory(kind: .sexuallyExplicit)
  121. /// Promotes or enables access to harmful goods, services, or activities.
  122. public static let dangerousContent = HarmCategory(kind: .dangerousContent)
  123. /// Content that may be used to harm civic integrity.
  124. public static let civicIntegrity = HarmCategory(kind: .civicIntegrity)
  125. /// Returns the raw string representation of the `HarmCategory` value.
  126. ///
  127. /// > Note: This value directly corresponds to the values in the
  128. /// > [REST API](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/HarmCategory).
  129. public let rawValue: String
  130. static let unrecognizedValueMessageCode =
  131. VertexLog.MessageCode.generateContentResponseUnrecognizedHarmCategory
  132. }
  133. // MARK: - Codable Conformances
  134. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  135. extension SafetyRating: Decodable {}
  136. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  137. extension SafetySetting.HarmBlockThreshold: Encodable {}
  138. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  139. extension SafetySetting: Encodable {}