GoogleCloudAiplatformV1SafetyRating
import type { GoogleCloudAiplatformV1SafetyRating } from "https://googleapis.deno.dev/v1/aiplatform:v1.ts";
Safety rating corresponding to the generated content.
interface GoogleCloudAiplatformV1SafetyRating {
readonly blocked?: boolean;
readonly category?:
| "HARM_CATEGORY_UNSPECIFIED"
| "HARM_CATEGORY_HATE_SPEECH"
| "HARM_CATEGORY_DANGEROUS_CONTENT"
| "HARM_CATEGORY_HARASSMENT"
| "HARM_CATEGORY_SEXUALLY_EXPLICIT"
| "HARM_CATEGORY_CIVIC_INTEGRITY"
| "HARM_CATEGORY_IMAGE_HATE"
| "HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT"
| "HARM_CATEGORY_IMAGE_HARASSMENT"
| "HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT";
readonly overwrittenThreshold?:
| "HARM_BLOCK_THRESHOLD_UNSPECIFIED"
| "BLOCK_LOW_AND_ABOVE"
| "BLOCK_MEDIUM_AND_ABOVE"
| "BLOCK_ONLY_HIGH"
| "BLOCK_NONE"
| "OFF";
readonly probability?:
| "HARM_PROBABILITY_UNSPECIFIED"
| "NEGLIGIBLE"
| "LOW"
| "MEDIUM"
| "HIGH";
readonly probabilityScore?: number;
readonly severity?:
| "HARM_SEVERITY_UNSPECIFIED"
| "HARM_SEVERITY_NEGLIGIBLE"
| "HARM_SEVERITY_LOW"
| "HARM_SEVERITY_MEDIUM"
| "HARM_SEVERITY_HIGH";
readonly severityScore?: number;
}§Properties
§
readonly blocked?: boolean
[src]Output only. Indicates whether the content was filtered out because of this rating.
§
readonly category?: "HARM_CATEGORY_UNSPECIFIED" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_DANGEROUS_CONTENT" | "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_CIVIC_INTEGRITY" | "HARM_CATEGORY_IMAGE_HATE" | "HARM_CATEGORY_IMAGE_DANGEROUS_CONTENT" | "HARM_CATEGORY_IMAGE_HARASSMENT" | "HARM_CATEGORY_IMAGE_SEXUALLY_EXPLICIT"
[src]Output only. Harm category.
§
readonly overwrittenThreshold?: "HARM_BLOCK_THRESHOLD_UNSPECIFIED" | "BLOCK_LOW_AND_ABOVE" | "BLOCK_MEDIUM_AND_ABOVE" | "BLOCK_ONLY_HIGH" | "BLOCK_NONE" | "OFF"
[src]Output only. The overwritten threshold for the safety category of Gemini 2.0 image out. If minors are detected in the output image, the threshold of each safety category will be overwritten if user sets a lower threshold.
§
readonly probability?: "HARM_PROBABILITY_UNSPECIFIED" | "NEGLIGIBLE" | "LOW" | "MEDIUM" | "HIGH"
[src]Output only. Harm probability levels in the content.