Safety.swift 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. // Copyright 2023 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. import Foundation
  15. /// A type defining potentially harmful media categories and their model-assigned ratings. A value
  16. /// of this type may be assigned to a category for every model-generated response, not just
  17. /// responses that exceed a certain threshold.
  18. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  19. public struct SafetyRating: Equatable, Hashable, Sendable {
  20. /// The category describing the potential harm a piece of content may pose.
  21. ///
  22. /// See ``HarmCategory`` for a list of possible values.
  23. public let category: HarmCategory
  24. /// The model-generated probability that the content falls under the specified harm ``category``.
  25. ///
  26. /// See ``HarmProbability`` for a list of possible values.
  27. ///
  28. /// > Important: This does not indicate the severity of harm for a piece of content.
  29. public let probability: HarmProbability
  30. /// Initializes a new `SafetyRating` instance with the given category and probability.
  31. /// Use this initializer for SwiftUI previews or tests.
  32. public init(category: HarmCategory, probability: HarmProbability) {
  33. self.category = category
  34. self.probability = probability
  35. }
  36. /// The probability that a given model output falls under a harmful content category. This does
  37. /// not indicate the severity of harm for a piece of content.
  38. public enum HarmProbability: String, Sendable {
  39. /// Unknown. A new server value that isn't recognized by the SDK.
  40. case unknown = "UNKNOWN"
  41. /// The probability is zero or close to zero. For benign content, the probability across all
  42. /// categories will be this value.
  43. case negligible = "NEGLIGIBLE"
  44. /// The probability is small but non-zero.
  45. case low = "LOW"
  46. /// The probability is moderate.
  47. case medium = "MEDIUM"
  48. /// The probability is high. The content described is very likely harmful.
  49. case high = "HIGH"
  50. }
  51. }
  52. /// A type used to specify a threshold for harmful content, beyond which the model will return a
  53. /// fallback response instead of generated content.
  54. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  55. public struct SafetySetting {
  56. /// Block at and beyond a specified ``SafetyRating/HarmProbability``.
  57. public enum HarmBlockThreshold: String, Sendable {
  58. // Content with `.negligible` will be allowed.
  59. case blockLowAndAbove = "BLOCK_LOW_AND_ABOVE"
  60. /// Content with `.negligible` and `.low` will be allowed.
  61. case blockMediumAndAbove = "BLOCK_MEDIUM_AND_ABOVE"
  62. /// Content with `.negligible`, `.low`, and `.medium` will be allowed.
  63. case blockOnlyHigh = "BLOCK_ONLY_HIGH"
  64. /// All content will be allowed.
  65. case blockNone = "BLOCK_NONE"
  66. }
  67. enum CodingKeys: String, CodingKey {
  68. case harmCategory = "category"
  69. case threshold
  70. }
  71. /// The category this safety setting should be applied to.
  72. public let harmCategory: HarmCategory
  73. /// The threshold describing what content should be blocked.
  74. public let threshold: HarmBlockThreshold
  75. /// Initializes a new safety setting with the given category and threshold.
  76. public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold) {
  77. self.harmCategory = harmCategory
  78. self.threshold = threshold
  79. }
  80. }
  81. /// Categories describing the potential harm a piece of content may pose.
  82. public enum HarmCategory: String, Sendable {
  83. /// Unknown. A new server value that isn't recognized by the SDK.
  84. case unknown = "HARM_CATEGORY_UNKNOWN"
  85. /// Harassment content.
  86. case harassment = "HARM_CATEGORY_HARASSMENT"
  87. /// Negative or harmful comments targeting identity and/or protected attributes.
  88. case hateSpeech = "HARM_CATEGORY_HATE_SPEECH"
  89. /// Contains references to sexual acts or other lewd content.
  90. case sexuallyExplicit = "HARM_CATEGORY_SEXUALLY_EXPLICIT"
  91. /// Promotes or enables access to harmful goods, services, or activities.
  92. case dangerousContent = "HARM_CATEGORY_DANGEROUS_CONTENT"
  93. }
  94. // MARK: - Codable Conformances
  95. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  96. extension SafetyRating.HarmProbability: Decodable {
  97. public init(from decoder: Decoder) throws {
  98. let value = try decoder.singleValueContainer().decode(String.self)
  99. guard let decodedProbability = SafetyRating.HarmProbability(rawValue: value) else {
  100. VertexLog.error(
  101. code: .generateContentResponseUnrecognizedHarmProbability,
  102. "Unrecognized HarmProbability with value \"\(value)\"."
  103. )
  104. self = .unknown
  105. return
  106. }
  107. self = decodedProbability
  108. }
  109. }
  110. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  111. extension SafetyRating: Decodable {}
  112. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  113. extension HarmCategory: Codable {
  114. public init(from decoder: Decoder) throws {
  115. let value = try decoder.singleValueContainer().decode(String.self)
  116. guard let decodedCategory = HarmCategory(rawValue: value) else {
  117. VertexLog.error(
  118. code: .generateContentResponseUnrecognizedHarmCategory,
  119. "Unrecognized HarmCategory with value \"\(value)\"."
  120. )
  121. self = .unknown
  122. return
  123. }
  124. self = decodedCategory
  125. }
  126. }
  127. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  128. extension SafetySetting.HarmBlockThreshold: Encodable {}
  129. @available(iOS 15.0, macOS 11.0, macCatalyst 15.0, tvOS 15.0, watchOS 8.0, *)
  130. extension SafetySetting: Encodable {}