GoogleCloudAiplatformV1SafetyRating
import type { GoogleCloudAiplatformV1SafetyRating } from "https://googleapis.deno.dev/v1/aiplatform:v1.ts";
Safety rating corresponding to the generated content.
interface GoogleCloudAiplatformV1SafetyRating {
readonly blocked?: boolean;
readonly category?:
| "HARM_CATEGORY_UNSPECIFIED"
| "HARM_CATEGORY_HATE_SPEECH"
| "HARM_CATEGORY_DANGEROUS_CONTENT"
| "HARM_CATEGORY_HARASSMENT"
| "HARM_CATEGORY_SEXUALLY_EXPLICIT";
readonly probability?:
| "HARM_PROBABILITY_UNSPECIFIED"
| "NEGLIGIBLE"
| "LOW"
| "MEDIUM"
| "HIGH";
readonly probabilityScore?: number;
readonly severity?:
| "HARM_SEVERITY_UNSPECIFIED"
| "HARM_SEVERITY_NEGLIGIBLE"
| "HARM_SEVERITY_LOW"
| "HARM_SEVERITY_MEDIUM"
| "HARM_SEVERITY_HIGH";
readonly severityScore?: number;
}§Properties
§
readonly blocked?: boolean
[src]Output only. Indicates whether the content was filtered out because of this rating.
§
readonly category?: "HARM_CATEGORY_UNSPECIFIED" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_DANGEROUS_CONTENT" | "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_SEXUALLY_EXPLICIT"
[src]Output only. Harm category.
§
readonly probability?: "HARM_PROBABILITY_UNSPECIFIED" | "NEGLIGIBLE" | "LOW" | "MEDIUM" | "HIGH"
[src]Output only. Harm probability levels in the content.