Source code for genai.schema._api

# generated by datamodel-codegen:
#   filename:  2024-05-22_openapi_schema

from __future__ import annotations

import warnings
from datetime import date
from enum import Enum
from typing import Any, Literal, Optional, Union

from pydantic import AwareDatetime, Field, RootModel, field_validator

from genai._types import ApiBaseModel


[docs] class ApiKeyResult(ApiBaseModel): created_at: AwareDatetime generated_at: AwareDatetime last_used_at: Optional[AwareDatetime] = None value: str
[docs] class BaseErrorExtension(ApiBaseModel): code: str state: Optional[dict[str, Any]] = None
[docs] class BaseErrorResponse(ApiBaseModel): error: str extensions: BaseErrorExtension message: str status_code: int
[docs] class BaseMessage(ApiBaseModel): content: str files: Optional[list[MessageFile]] = None role: ChatRole
[docs] class BaseTokens(ApiBaseModel): logprob: Optional[Union[float, str]] = None rank: Optional[int] = None text: Optional[str] = None top_tokens: Optional[list[GeneratedToken]] = None
[docs] class ChatRole(str, Enum): USER = "user" SYSTEM = "system" ASSISTANT = "assistant"
[docs] class ConcurrencyLimit(ApiBaseModel): limit: int remaining: int
[docs] class DecodingMethod(str, Enum): GREEDY = "greedy" SAMPLE = "sample"
[docs] class DeploymentResult(ApiBaseModel): created_at: AwareDatetime deployed_at: Optional[AwareDatetime] = None expires_at: Optional[AwareDatetime] = None id: str status: DeploymentStatus tune_id: Optional[str] = None updated_at: AwareDatetime
[docs] class DeploymentStatus(str, Enum): QUEUED = "queued" INITIALIZING = "initializing" READY = "ready" FAILED = "failed" EXPIRED = "expired"
[docs] class EvaluationExperiment(ApiBaseModel): created_at: AwareDatetime description: Optional[str] = None evaluations_count: float file: Optional[EvaluationFile] = None id: str name: str task: EvaluationTask template_id: str
[docs] class EvaluationExperimentSortBy(str, Enum): NAME = "name" ID = "id" TEMPLATE = "template"
[docs] class EvaluationFieldOperation(ApiBaseModel): type: EvaluationOperationType
[docs] class EvaluationFile(ApiBaseModel): bytes: int created_at: AwareDatetime file_name: str id: str metadata: Optional[FileMetadata] = None
[docs] class EvaluationInstanceResult(ApiBaseModel): generation_info: Optional[dict[str, Any]] = Field(None, title="Generation Info") score: dict[str, Any]
[docs] class EvaluationLimit(ApiBaseModel): concurrency: ConcurrencyLimit
[docs] class EvaluationOperationType(str, Enum): RENAME_FIELDS = "rename_fields" ADD_FIELDS = "add_fields" SHUFFLE = "shuffle"
[docs] class EvaluationParentTask(ApiBaseModel): id: str name: str
[docs] class EvaluationPrompt(ApiBaseModel): id: str input: Optional[str] = None metadata: Optional[dict[str, Any]] = None name: str
[docs] class EvaluationResult(ApiBaseModel): created_at: AwareDatetime description: Optional[str] = None experiment_id: str file: Optional[EvaluationFile] = None finished_at: Optional[AwareDatetime] = None id: str model_id: str name: str parameters: Optional[dict[str, Any]] = None prompt: EvaluationPrompt result: dict[str, Any] started_at: Optional[AwareDatetime] = None status: EvaluationStatus status_message: Optional[str] = None task_id: str template_id: str template_name: str
[docs] class EvaluationSortBy(str, Enum): NAME = "name" ID = "id" TEMPLATE = "template"
[docs] class EvaluationStatus(str, Enum): PENDING = "pending" QUEUED = "queued" RUNNING = "running" COMPLETED = "completed" FAILED = "failed"
[docs] class EvaluationTask(ApiBaseModel): id: str name: str parent_task: Optional[EvaluationParentTask] = None
[docs] class EvaluationTemplate(ApiBaseModel): dataset_fields: dict[str, Any] default_prompt_ids: Optional[list[str]] = None id: str metrics: list[Union[list[Metric], Metrics]] name: str postprocessors: list[str] task: Optional[EvaluationTask] = None
[docs] class Extensions(BaseErrorExtension): code: Literal["INVALID_INPUT"] = "INVALID_INPUT"
[docs] class Extensions1(BaseErrorExtension): code: Literal["INTERNAL_SERVER_ERROR"] = "INTERNAL_SERVER_ERROR"
[docs] class Extensions2(BaseErrorExtension): code: Literal["NOT_FOUND"] = "NOT_FOUND"
[docs] class Extensions3(BaseErrorExtension): code: Literal["TOO_MANY_REQUESTS"] = "TOO_MANY_REQUESTS"
[docs] class Extensions4(BaseErrorExtension): code: Literal["AUTH_ERROR"] = "AUTH_ERROR"
[docs] class Extensions5(BaseErrorExtension): code: Literal["SERVICE_UNAVAILABLE"] = "SERVICE_UNAVAILABLE"
[docs] class FileDescendant(ApiBaseModel): id: str
[docs] class FileFormat(ApiBaseModel): id: int name: str
[docs] class FileListSortBy(str, Enum): NAME = "name" CREATED_AT = "created_at"
[docs] class FileMetadata(ApiBaseModel): stats: FileMetadataStats
[docs] class FileMetadataStats(ApiBaseModel): columns: list[str] records_count: float
[docs] class FilePurpose(str, Enum): TUNE = "tune" TEMPLATE = "template" TUNE_IMPORT = "tune_import" EXTRACTION = "extraction" GENERIC_STRUCTURED = "generic_structured"
[docs] class FileResult(ApiBaseModel): bytes: int created_at: AwareDatetime descendants: Optional[list[FileDescendant]] = None file_formats: Optional[list[FileFormat]] = None file_name: str id: str metadata: Optional[FileMetadata] = None origin: Optional[FileDescendant] = None purpose: FilePurpose storage_provider_location: StorageProviderLocation updated_at: AwareDatetime
[docs] class FolderResult(ApiBaseModel): created_at: AwareDatetime id: str name: str prompt_ids: Optional[list[str]] = None
[docs] class GeneratedToken(ApiBaseModel): logprob: Optional[Union[float, str]] = None text: Optional[str] = None
[docs] class HAPOptions(ApiBaseModel): send_tokens: Optional[bool] = False threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0)
[docs] class Input(RootModel[list[Any]]): root: list[Any] = Field(..., max_length=20)
[docs] class InternalServerErrorResponse(BaseErrorResponse): extensions: Extensions1 status_code: Literal[500] = 500
[docs] class LengthPenalty(ApiBaseModel): decay_factor: Optional[float] = Field(None, gt=1.0, title="Decay factor") """ Represents the factor of exponential decay and must be > 1.0. Larger values correspond to more aggressive decay. """ start_index: Optional[int] = Field(None, ge=1, title="Start index") """ A number of generated tokens after which this should take effect. """
[docs] class MessageFile(ApiBaseModel): content: Optional[str] = None id: Optional[str] = None
[docs] class Metric(ApiBaseModel): description: str id: str name: str
[docs] class Metrics(ApiBaseModel): pass
[docs] class ModelFacet(ApiBaseModel): id: str name: str type: ModelFacetType
[docs] class ModelFacetType(str, Enum): LANGUAGE = "language" INDUSTRY = "industry" MODEL_TYPE = "model_type"
[docs] class ModelFamily(ApiBaseModel): description: Optional[str] = None id: int name: str prompt_example: Optional[str] = None short_description: Optional[str] = None system_prompt: Optional[str] = None
[docs] class ModelIdRetrieveResult(ApiBaseModel): description: Optional[str] = None developer: Optional[str] = None disabled: bool facets: Optional[list[ModelFacet]] = None id: str is_live: bool label: str model_family: ModelFamily name: str preferred: bool prompt_builder_example: Optional[dict[str, Any]] = None size: str source_model_id: Optional[str] = None system_prompt: Optional[str] = None system_prompt_id: Optional[int] = None tags: list[str] tasks: list[Tasks] token_limits: list[ModelTokenLimits] warning: Optional[str] = None
[docs] class ModelRetrieveResults(ApiBaseModel): facets: Optional[list[ModelFacet]] = None id: str is_live: bool label: str name: str size: str source_model_id: Optional[str] = None task_ids: list[str] token_limits: list[ModelTokenLimits] warning: Optional[str] = None
[docs] class ModelTokenLimits(ApiBaseModel): beam_width: int token_limit: int
[docs] class ModelType(str, Enum): MODEL = "model" TUNE = "tune"
[docs] class ModerationHAP(ApiBaseModel): input: Optional[ModerationHAPInput] = None output: Optional[ModerationHAPOutput] = None
[docs] class ModerationHAPInput(ApiBaseModel): enabled: Optional[bool] = False """ Detects HAP (hateful, abusive, or profane language). """ send_tokens: Optional[bool] = False threshold: Optional[float] = Field(0.75, ge=0.01, le=0.99, multiple_of=0.01) """ The higher the number, the more confidence that the sentence contains HAP. The threshold allows you to modify how much confidence is needed for the sentence to be flagged as containing HAP. """
[docs] class ModerationHAPOutput(ApiBaseModel): enabled: Optional[bool] = False """ Detects HAP (hateful, abusive, or profane language). """ send_tokens: Optional[bool] = False threshold: Optional[float] = Field(0.75, ge=0.01, le=0.99, multiple_of=0.01) """ The higher the number, the more confidence that the sentence contains HAP. The threshold allows you to modify how much confidence is needed for the sentence to be flagged as containing HAP. """
[docs] class ModerationImplicitHate(ApiBaseModel): input: Optional[ModerationImplicitHateInput] = None output: Optional[ModerationImplicitHateOutput] = None
[docs] class ModerationImplicitHateInput(ApiBaseModel): enabled: bool send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0)
[docs] class ModerationImplicitHateOutput(ApiBaseModel): enabled: bool send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0)
[docs] class ModerationParameters(ApiBaseModel): hap: Optional[ModerationHAP] = None social_bias: Optional[ModerationSocialBias] = None
[docs] class ModerationPosition(ApiBaseModel): end: int start: int
[docs] class ModerationSocialBias(ApiBaseModel): input: Optional[ModerationSocialBiasInput] = None output: Optional[ModerationSocialBiasOutput] = None
[docs] class ModerationSocialBiasInput(ApiBaseModel): enabled: Optional[bool] = False """ Detects social bias. """ send_tokens: Optional[bool] = False threshold: Optional[float] = Field(0.75, ge=0.01, le=0.99, multiple_of=0.01) """ The higher the number, the more confidence that the sentence contains social bias. The threshold allows you to modify how much confidence is needed for the sentence to be flagged as containing social bias. """
[docs] class ModerationSocialBiasOutput(ApiBaseModel): enabled: Optional[bool] = False """ Detects social bias. """ send_tokens: Optional[bool] = False threshold: Optional[float] = Field(0.75, ge=0.01, le=0.99, multiple_of=0.01) """ The higher the number, the more confidence that the sentence contains social bias. The threshold allows you to modify how much confidence is needed for the sentence to be flagged as containing social bias. """
[docs] class ModerationStigma(ApiBaseModel): input: Optional[ModerationStigmaInput] = None output: Optional[ModerationStigmaOutput] = None
[docs] class ModerationStigmaInput(ApiBaseModel): enabled: bool send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0)
[docs] class ModerationStigmaOutput(ApiBaseModel): enabled: bool send_tokens: Optional[bool] = None threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0)
[docs] class ModerationTokens(ApiBaseModel): index: Optional[int] = None score: Optional[float] = None token: Optional[str] = None
[docs] class NotFoundResponse(BaseErrorResponse): extensions: Extensions2 status_code: Literal[404] = 404
class _ApiKeyRegenerateCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class ApiKeyRegenerateCreateResponse(ApiBaseModel): result: Optional[ApiKeyResult] = None
class _ApiKeyRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class ApiKeyRetrieveResponse(ApiBaseModel): result: Optional[ApiKeyResult] = None
class _DeploymentCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _DeploymentCreateRequest(ApiBaseModel): tune_id: str
[docs] class DeploymentCreateResponse(ApiBaseModel): result: DeploymentResult
class _DeploymentIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _DeploymentIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class DeploymentIdRetrieveResponse(ApiBaseModel): result: DeploymentResult
class _DeploymentRetrieveParametersQuery(ApiBaseModel): id: Optional[list[str]] = None limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) version: Literal["2023-11-22"] = "2023-11-22"
[docs] class DeploymentRetrieveResponse(ApiBaseModel): results: list[DeploymentResult] total_count: float
class _EvaluationCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _EvaluationCreateRequest(ApiBaseModel): dataset_file_id: str description: Optional[str] = None experiment_id: Optional[str] = None field_operations: Optional[list[EvaluationFieldOperation]] = None metrics: list[str] model_id: Optional[str] = None name: str parameters: Optional[TextGenerationParameters] = None postprocessors: Optional[list[str]] = None prompt_id: str template_id: str
[docs] class EvaluationCreateResponse(ApiBaseModel): result: EvaluationResult
class _EvaluationExperimentCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _EvaluationExperimentCreateRequest(ApiBaseModel): dataset_file_id: str description: Optional[str] = None name: str template_id: str
[docs] class EvaluationExperimentCreateResponse(ApiBaseModel): result: EvaluationExperiment
class _EvaluationExperimentIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _EvaluationExperimentIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationExperimentIdRetrieveResponse(ApiBaseModel): result: EvaluationResult
class _EvaluationExperimentRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) sort_by: Optional[EvaluationExperimentSortBy] = None direction: Optional[SortDirection] = None version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationExperimentRetrieveResponse(ApiBaseModel): results: list[EvaluationExperiment] total_count: int
class _EvaluationIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _EvaluationIdInstanceResultRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationIdInstanceResultRetrieveResponse(ApiBaseModel): results: list[EvaluationInstanceResult] total_count: int
class _EvaluationIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationIdRetrieveResponse(ApiBaseModel): result: EvaluationResult
class _EvaluationLimitRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationLimitRetrieveResponse(ApiBaseModel): result: EvaluationLimit
class _EvaluationPreviewCreateParametersQuery(ApiBaseModel): limit: Optional[int] = Field(3, ge=1, le=10) version: Literal["2023-11-22"] = "2023-11-22" class _EvaluationPreviewCreateRequest(ApiBaseModel): dataset_file_id: str description: Optional[str] = None experiment_id: Optional[str] = None field_operations: Optional[list[EvaluationFieldOperation]] = None metrics: list[str] model_id: Optional[str] = None name: str parameters: Optional[TextGenerationParameters] = None postprocessors: Optional[list[str]] = None prompt_id: str template_id: str
[docs] class EvaluationPreviewCreateResponse(ApiBaseModel): results: list[EvaluationInstanceResult] total_count: int
class _EvaluationRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) sort_by: Optional[EvaluationSortBy] = None direction: Optional[SortDirection] = None experiment_id: Optional[str] = None version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationRetrieveResponse(ApiBaseModel): results: list[EvaluationResult] total_count: int
class _EvaluationTemplateIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationTemplateIdRetrieveResponse(ApiBaseModel): result: EvaluationTemplate
class _EvaluationTemplateRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class EvaluationTemplateRetrieveResponse(ApiBaseModel): results: list[EvaluationTemplate]
class _FileCreateParametersQuery(ApiBaseModel): version: Literal["2024-05-13"] = "2024-05-13" class _FileCreateRequest(ApiBaseModel): file: bytes origin_id: Optional[str] = None purpose: FilePurpose
[docs] class FileCreateResponse(ApiBaseModel): result: FileResult
class _FileIdContentRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _FileIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _FileIdPatchParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _FileIdPatchRequest(ApiBaseModel): file: bytes
[docs] class FileIdPatchResponse(ApiBaseModel): result: FileResult
class _FileIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2024-05-13"] = "2024-05-13"
[docs] class FileIdRetrieveResponse(ApiBaseModel): result: FileResult
class _FileRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) sort_by: Optional[FileListSortBy] = None direction: Optional[SortDirection] = None search: Optional[str] = None purpose: Optional[FilePurpose] = None format_id: Optional[int] = None version: Literal["2024-05-13"] = "2024-05-13"
[docs] class FileRetrieveResponse(ApiBaseModel): results: list[FileResult] total_count: int
class _FolderCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _FolderCreateRequest(ApiBaseModel): name: str
[docs] class FolderCreateResponse(ApiBaseModel): result: FolderResult
class _FolderIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _FolderIdPatchParametersQuery(ApiBaseModel): version: Literal["2024-01-10"] = "2024-01-10" class _FolderIdPatchRequest(ApiBaseModel): position: Optional[int] = Field(None, ge=1)
[docs] class FolderIdPatchResponse(ApiBaseModel): result: FolderResult
class _FolderIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class FolderIdRetrieveResponse(ApiBaseModel): result: FolderResult
class _FolderIdUpdateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _FolderIdUpdateRequest(ApiBaseModel): name: str
[docs] class FolderIdUpdateResponse(ApiBaseModel): result: FolderResult
class _FolderRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) version: Literal["2023-11-22"] = "2023-11-22"
[docs] class FolderRetrieveResponse(ApiBaseModel): results: list[FolderResult] total_count: int
class _ModelIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2024-01-30"] = "2024-01-30"
[docs] class ModelIdRetrieveResponse(ApiBaseModel): result: ModelIdRetrieveResult
class _ModelRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) type: Optional[ModelType] = None version: Literal["2023-11-22"] = "2023-11-22"
[docs] class ModelRetrieveResponse(ApiBaseModel): results: list[ModelRetrieveResults] total_count: int
class _PromptCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _PromptCreateRequest(ApiBaseModel): data: Optional[PromptTemplateData] = None description: Optional[str] = None folder_id: Optional[str] = None industry_id: Optional[str] = None input: Optional[str] = None language_id: Optional[str] = None messages: Optional[list[BaseMessage]] = None model_id: str moderations: Optional[ModerationParameters] = None name: str output: Optional[str] = Field(None, min_length=1) parameters: Optional[TextGenerationParameters] = None prompt_id: Optional[str] = None task_id: Optional[str] = None type: Optional[PromptType] = None
[docs] class PromptCreateResponse(ApiBaseModel): result: PromptResult
class _PromptIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _PromptIdPatchParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _PromptIdPatchRequest(ApiBaseModel): folder_id: Optional[str] = None name: Optional[str] = None type: Optional[PromptType] = None
[docs] class PromptIdPatchResponse(ApiBaseModel): result: PromptResult
class _PromptIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19"
[docs] class PromptIdRetrieveResponse(ApiBaseModel): result: PromptResult
class _PromptIdUpdateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _PromptIdUpdateRequest(ApiBaseModel): data: Optional[PromptTemplateData] = None description: Optional[str] = None folder_id: Optional[str] = None industry_id: Optional[str] = None input: Optional[str] = None language_id: Optional[str] = None messages: Optional[list[BaseMessage]] = None model_id: str moderations: Optional[ModerationParameters] = None name: str output: Optional[str] = Field(None, min_length=1) parameters: Optional[TextGenerationParameters] = None prompt_id: Optional[str] = None task_id: Optional[str] = None type: Optional[PromptType] = None
[docs] class PromptIdUpdateResponse(ApiBaseModel): result: PromptResult
class _PromptRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) sort_by: Optional[PromptListSortBy] = None direction: Optional[SortDirection] = None search: Optional[str] = None task_id: Optional[Union[str, list[str]]] = None model_id: Optional[Union[str, list[str]]] = None source: Optional[Union[PromptListSource, list[PromptListSource]]] = None model_family_id: Optional[float] = None industry_id: Optional[Union[str, list[str]]] = None prompt_language_id: Optional[Union[str, list[str]]] = None model_type_id: Optional[Union[str, list[str]]] = None avg_time_min: Optional[int] = None avg_time_max: Optional[int] = None context_window_min: Optional[int] = None context_window_max: Optional[int] = None folder_id: Optional[str] = None version: Literal["2024-03-19"] = "2024-03-19"
[docs] class PromptRetrieveResponse(ApiBaseModel): results: list[PromptResult] total_count: int
class _RequestChatConversationIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _RequestChatConversationIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19"
[docs] class RequestChatConversationIdRetrieveResponse(ApiBaseModel): results: list[RequestChatConversationIdRetrieveResults]
class _RequestIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _RequestIdFeedbackCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _RequestIdFeedbackCreateRequest(ApiBaseModel): categories: Optional[list[RequestFeedbackCategory]] = Field(None, min_length=1) comment: Optional[str] = None contact_consent: Optional[bool] = False vote: Optional[RequestFeedbackVote] = None
[docs] class RequestIdFeedbackCreateResponse(ApiBaseModel): result: RequestFeedbackResult
class _RequestIdFeedbackRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class RequestIdFeedbackRetrieveResponse(ApiBaseModel): result: RequestFeedbackResult
class _RequestIdFeedbackUpdateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _RequestIdFeedbackUpdateRequest(ApiBaseModel): categories: Optional[list[RequestFeedbackCategory]] = Field(None, min_length=1) comment: Optional[str] = None contact_consent: Optional[bool] = False vote: Optional[RequestFeedbackVote] = None
[docs] class RequestIdFeedbackUpdateResponse(ApiBaseModel): result: RequestFeedbackResult
class _RequestRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) status: Optional[RequestStatus] = None origin: Optional[RequestOrigin] = None before: Optional[AwareDatetime] = None after: Optional[AwareDatetime] = None endpoint: Optional[Union[RequestEndpoint, list[RequestEndpoint]]] = None api: Optional[RequestApiVersion] = None date_: Optional[date] = Field(None, alias="date") version: Literal["2023-11-22"] = "2023-11-22"
[docs] class RequestRetrieveResponse(ApiBaseModel): results: list[RequestRetrieveResults] total_count: int
class _SystemPromptCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _SystemPromptCreateRequest(ApiBaseModel): content: str name: str
[docs] class SystemPromptCreateResponse(ApiBaseModel): result: SystemPrompt
class _SystemPromptIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _SystemPromptIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class SystemPromptIdRetrieveResponse(ApiBaseModel): result: SystemPrompt
class _SystemPromptIdUpdateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _SystemPromptIdUpdateRequest(ApiBaseModel): content: str name: str
[docs] class SystemPromptIdUpdateResponse(ApiBaseModel): result: SystemPrompt
class _SystemPromptRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) version: Literal["2023-11-22"] = "2023-11-22"
[docs] class SystemPromptRetrieveResponse(ApiBaseModel): results: list[SystemPrompt] total_count: int
class _TagRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) type: Optional[TagType] = None version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TagRetrieveResponse(ApiBaseModel): results: list[Tag]
class _TaskRetrieveParametersQuery(ApiBaseModel): tune: Optional[bool] = True version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TaskRetrieveResponse(ApiBaseModel): results: list[Tasks]
class _TextChatCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextChatCreateRequest(ApiBaseModel): conversation_id: Optional[str] = None messages: Optional[list[BaseMessage]] = Field(None, min_length=1) model_id: Optional[str] = None moderations: Optional[ModerationParameters] = None parameters: Optional[TextGenerationParameters] = None parent_id: Optional[str] = None prompt_id: Optional[str] = None prompt_template_id: Optional[str] = None trim_method: Optional[TrimMethod] = None use_conversation_parameters: Optional[bool] = None
[docs] class TextChatCreateResponse(ApiBaseModel): conversation_id: str created_at: Optional[AwareDatetime] = None id: Optional[str] = None input_parameters: Optional[dict[str, Any]] = None model_id: Optional[str] = None results: list[TextGenerationResult]
class _TextChatOutputCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextChatOutputCreateRequest(ApiBaseModel): conversation_id: Optional[str] = None messages: Optional[list[BaseMessage]] = Field(None, min_length=1) model_id: Optional[str] = None moderations: Optional[ModerationParameters] = None parameters: Optional[TextGenerationParameters] = None parent_id: Optional[str] = None prompt_id: Optional[str] = None prompt_template_id: Optional[str] = None trim_method: Optional[TrimMethod] = None use_conversation_parameters: Optional[bool] = None
[docs] class TextChatOutputCreateResponse(ApiBaseModel): result: str
class _TextChatStreamCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextChatStreamCreateRequest(ApiBaseModel): conversation_id: Optional[str] = None messages: Optional[list[BaseMessage]] = Field(None, min_length=1) model_id: Optional[str] = None moderations: Optional[ModerationParameters] = None parameters: Optional[TextGenerationParameters] = None parent_id: Optional[str] = None prompt_id: Optional[str] = None prompt_template_id: Optional[str] = None trim_method: Optional[TrimMethod] = None use_conversation_parameters: Optional[bool] = None
[docs] class TextChatStreamCreateResponse(ApiBaseModel): conversation_id: str created_at: Optional[AwareDatetime] = None id: Optional[str] = None input_parameters: Optional[dict[str, Any]] = None model_id: Optional[str] = None moderations: Optional[TextCreateResponseModeration] = None results: Optional[list[TextChatGenerationStreamResult]] = None
class _TextClassificationCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TextClassificationCreateRequest(ApiBaseModel): data: list[TextClassificationCreateData] = Field(..., min_length=1) input: str model_id: str
[docs] class TextClassificationCreateResponse(ApiBaseModel): result: TextClassificationResult
class _TextEmbeddingCreateParametersQuery(ApiBaseModel): version: Literal["2024-04-15"] = "2024-04-15" class _TextEmbeddingCreateRequest(ApiBaseModel): input: Union[str, Input] model_id: str parameters: Optional[TextEmbeddingParameters] = None
[docs] class TextEmbeddingCreateResponse(ApiBaseModel): created_at: AwareDatetime model_id: str results: list[_TextEmbeddingCreateResults]
class _TextEmbeddingLimitRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TextEmbeddingLimitRetrieveResponse(ApiBaseModel): result: TextEmbeddingLimit
class _TextExtractionLimitRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TextGenerationComparisonCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextGenerationComparisonCreateRequest(ApiBaseModel): compare_parameters: TextGenerationComparisonParameters name: Optional[str] = None request: TextGenerationComparisonCreateRequestRequest
[docs] class TextGenerationComparisonCreateResponse(ApiBaseModel): results: list[TextGenerationComparisonCreateResults]
class _TextGenerationCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextGenerationCreateRequest(ApiBaseModel): data: Optional[PromptTemplateData] = None input: Optional[str] = Field(None, examples=["How are you"], title="Input string") """ The input is the prompt to generate completions, passed as a string. Note: The method tokenizes the input internally. It is recommended not to leave any trailing spaces. """ model_id: Optional[str] = Field(None, title="Model ID") """ The ID of the model or tune to be used for this request. """ moderations: Optional[ModerationParameters] = None parameters: Optional[TextGenerationParameters] = None prompt_id: Optional[str] = Field(None, min_length=1, title="Saved prompt Id")
[docs] class TextGenerationCreateResponse(ApiBaseModel): created_at: AwareDatetime id: str input_parameters: Optional[dict[str, Any]] = None model_id: str results: list[TextGenerationResult]
class _TextGenerationIdFeedbackCreateParametersQuery(ApiBaseModel): version: Literal["2024-02-20"] = "2024-02-20" class _TextGenerationIdFeedbackCreateRequest(ApiBaseModel): categories: Optional[list[TextGenerationFeedbackCategory]] = Field(None, min_length=1) comment: Optional[str] = None contact_consent: Optional[bool] = False vote: Optional[TextGenerationFeedbackVote] = None
[docs] class TextGenerationIdFeedbackCreateResponse(ApiBaseModel): result: TextGenerationFeedbackResult
class _TextGenerationIdFeedbackRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TextGenerationIdFeedbackRetrieveResponse(ApiBaseModel): result: TextGenerationFeedbackResult
class _TextGenerationIdFeedbackUpdateParametersQuery(ApiBaseModel): version: Literal["2024-02-20"] = "2024-02-20" class _TextGenerationIdFeedbackUpdateRequest(ApiBaseModel): categories: Optional[list[TextGenerationFeedbackCategory]] = Field(None, min_length=1) comment: Optional[str] = None contact_consent: Optional[bool] = False vote: Optional[TextGenerationFeedbackVote] = None
[docs] class TextGenerationIdFeedbackUpdateResponse(ApiBaseModel): result: TextGenerationFeedbackResult
class _TextGenerationLimitRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TextGenerationLimitRetrieveResponse(ApiBaseModel): result: TextGenerationLimit
class _TextGenerationOutputCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextGenerationOutputCreateRequest(ApiBaseModel): data: Optional[PromptTemplateData] = None input: Optional[str] = None model_id: Optional[str] = None moderations: Optional[ModerationParameters] = None parameters: Optional[TextGenerationParameters] = None prompt_id: Optional[str] = None use_default: Optional[bool] = None
[docs] class TextGenerationOutputCreateResponse(ApiBaseModel): results: list[str]
class _TextGenerationStreamCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextGenerationStreamCreateRequest(ApiBaseModel): data: Optional[PromptTemplateData] = None input: Optional[str] = Field(None, examples=["How are you"], title="Input string") """ The input is the prompt to generate completions, passed as a string. Note: The method tokenizes the input internally. It is recommended not to leave any trailing spaces. """ model_id: Optional[str] = Field(None, title="Model ID") """ The ID of the model or tune to be used for this request. """ moderations: Optional[ModerationParameters] = None parameters: Optional[TextGenerationParameters] = None prompt_id: Optional[str] = Field(None, min_length=1, title="Saved prompt Id")
[docs] class TextGenerationStreamCreateResponse(ApiBaseModel): created_at: Optional[AwareDatetime] = None id: Optional[str] = None input_parameters: Optional[dict[str, Any]] = None model_id: str moderations: Optional[TextCreateResponseModeration] = None results: Optional[list[TextGenerationStreamResult]] = None
class _TextModerationCreateParametersQuery(ApiBaseModel): version: Literal["2024-03-19"] = "2024-03-19" class _TextModerationCreateRequest(ApiBaseModel): hap: Optional[HAPOptions] = None input: str = Field(..., max_length=20480) social_bias: Optional[SocialBiasOptions] = None
[docs] class TextModerationCreateResponse(ApiBaseModel): results: list[TextCreateResponseModeration]
class _TextRerankCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TextRerankCreateRequest(ApiBaseModel): documents: list[str] = Field(..., min_length=1) model_id: str parameters: Optional[TextRerankParameters] = None query: str
[docs] class TextRerankCreateResponse(ApiBaseModel): result: _TextRerankCreateResult
class _TextSentenceSimilarityCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TextSentenceSimilarityCreateRequest(ApiBaseModel): model_id: str parameters: Optional[TextSentenceSimilarityParameters] = None sentences: list[str] = Field(..., min_length=1) source_sentence: str
[docs] class TextSentenceSimilarityCreateResponse(ApiBaseModel): results: list[TextSentenceSimilarityCreateResult]
class _TextTokenizationCreateParametersQuery(ApiBaseModel): version: Literal["2024-01-10"] = "2024-01-10" class _TextTokenizationCreateRequest(ApiBaseModel): data: Optional[PromptTemplateData] = None input: Optional[Union[str, list[str]]] = None model_id: Optional[str] = None parameters: Optional[TextTokenizationParameters] = None prompt_id: Optional[str] = None
[docs] class TextTokenizationCreateResponse(ApiBaseModel): created_at: str model_id: str results: list[TextTokenizationCreateResults]
class _TimeSerieForecastingCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TimeSerieForecastingCreateRequest(ApiBaseModel): conditional_columns: Optional[list[str]] = Field(None, max_length=500, min_length=0, title="Conditional Columns") """ An optional array of column headings which constitute the conditional variables. """ context_length: Optional[int] = Field(None, title="Context Length") """ Context length of the forecast. """ control_columns: Optional[list[str]] = Field(None, max_length=500, min_length=0, title="Control Columns") """ An optional array of column headings which constitute the control variables. """ data: str = Field(..., max_length=50000, min_length=1, title="Data") """ Base64 encoded string of data. """ future_data: Optional[str] = Field(None, max_length=50000, min_length=0, title="Future Data") """ Base64 encoded string of data for future supporting features. """ id_columns: Optional[list[str]] = Field(None, max_length=10, min_length=0, title="Id Columns") """ Columns that define a unique key for time series. """ model_id: str observable_columns: Optional[list[str]] = Field(None, max_length=500, min_length=0, title="Observable Columns") """ An optional array of column headings which constitute the observable variables. """ prediction_length: Optional[int] = Field(None, title="Prediction Length") """ The prediction length for the forecast. """ static_categorical_columns: Optional[list[str]] = Field( None, max_length=500, min_length=0, title="Static Categorical Columns" ) """ An optional array of column headings which constitute the static categorical variables. """ target_columns: Optional[list[str]] = Field(None, max_length=500, min_length=0, title="Target Columns") """ An array of column headings which constitute the target variables. """ timestamp_column: str = Field(..., max_length=100, min_length=1, title="Timestamp Column") """ A valid column in the data that should be treated as the timestamp. """
[docs] class TimeSerieForecastingCreateResponse(ApiBaseModel): result: _TimeSerieForecastingCreateResult
class _TimeSerieLimitRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TimeSerieLimitRetrieveResponse(ApiBaseModel): result: TimeSeriesLimit
class _TuneCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TuneCreateRequest(ApiBaseModel): evaluation_file_ids: Optional[list[str]] = None model_id: str name: str parameters: Optional[TuneParameters] = None task_id: str training_file_ids: list[str] tuning_type: str validation_file_ids: Optional[list[str]] = None
[docs] class TuneCreateResponse(ApiBaseModel): result: TuneResult
class _TuneFromFileCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TuneFromFileCreateRequest(ApiBaseModel): file_id: str name: str
[docs] class TuneFromFileCreateResponse(ApiBaseModel): result: TuneResult
class _TuneIdContentRetrieveParametersQuery(ApiBaseModel): content: str version: Literal["2023-11-22"] = "2023-11-22" class _TuneIdContentTypeRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-12-15"] = "2023-12-15" class _TuneIdDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TuneIdPatchParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TuneIdPatchRequest(ApiBaseModel): name: Optional[str] = None preferred: Optional[bool] = None
[docs] class TuneIdPatchResponse(ApiBaseModel): result: TuneResult
class _TuneIdRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TuneIdRetrieveResponse(ApiBaseModel): result: TuneResult
class _TunePreflightCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _TunePreflightCreateRequest(ApiBaseModel): evaluation_file_ids: Optional[list[str]] = None model_id: str name: str parameters: Optional[TuneParameters] = None task_id: str training_file_ids: list[str] tuning_type: str validation_file_ids: Optional[list[str]] = None
[docs] class TunePreflightCreateResponse(ApiBaseModel): result: _TunePreflightCreateResult
class _TuneRetrieveParametersQuery(ApiBaseModel): limit: Optional[int] = Field(100, ge=1, le=100) offset: Optional[int] = Field(0, ge=0) status: Optional[TuneStatus] = None search: Optional[str] = None sort_by: Optional[TuneListSortBy] = None direction: Optional[SortDirection] = None version: Literal["2023-11-22"] = "2023-11-22"
[docs] class TuneRetrieveResponse(ApiBaseModel): results: list[TuneResult] total_count: int
class _TuningTypeRetrieveParametersQuery(ApiBaseModel): version: Literal["2024-01-30"] = "2024-01-30"
[docs] class TuningTypeRetrieveResponse(ApiBaseModel): results: list[TuningTypeRetrieveResults]
class _UserCreateParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _UserCreateRequest(ApiBaseModel): first_name: Optional[str] = None last_name: Optional[str] = None
[docs] class UserCreateResponse(ApiBaseModel): result: UserCreateResult
class _UserDeleteParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _UserPatchParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22" class _UserPatchRequest(ApiBaseModel): data_usage_consent: Optional[bool] = None tou_accepted: Optional[bool] = None
[docs] class UserPatchResponse(ApiBaseModel): result: UserResult
class _UserRetrieveParametersQuery(ApiBaseModel): version: Literal["2023-11-22"] = "2023-11-22"
[docs] class UserRetrieveResponse(ApiBaseModel): result: UserResult
class _TextEmbeddingCreateRequestParametersReturnOptions(ApiBaseModel): input_text: Optional[bool] = None class _TextEmbeddingCreateResults(ApiBaseModel): embedding: list[float] input_text: Optional[str] = None class _TextRerankCreateResult(ApiBaseModel): query: Optional[str] = None results: list[TextRerankResult] class _TimeSerieForecastingCreateResult(ApiBaseModel): predictions: str class _TunePreflightCreateResult(ApiBaseModel): training_file_preview: Optional[list[dict[str, Any]]] = None validation_file_preview: Optional[list[dict[str, Any]]] = None
[docs] class PromptListSortBy(str, Enum): TYPE = "type" MODEL_TASK = "model_task" UPDATED_AT = "updated_at" CREATED_AT = "created_at" NAME = "name" ID = "id" MODEL = "model" USAGE_COUNT = "usage_count"
[docs] class PromptListSource(str, Enum): USER = "user" EXAMPLE = "example" COMMUNITY = "community"
[docs] class PromptModerationParameters(ModerationParameters): hap: Optional[ModerationHAP] = None implicit_hate: Optional[ModerationImplicitHate] = None social_bias: Optional[ModerationSocialBias] = None stigma: Optional[ModerationStigma] = None
[docs] class PromptResult(ApiBaseModel): author: Optional[PromptResultAuthor] = None created_at: AwareDatetime data: Optional[dict[str, Any]] = None description: Optional[str] = None folder_id: Optional[str] = None id: str input: Optional[str] = None messages: Optional[list[BaseMessage]] = None metadata: Optional[dict[str, Any]] = None model_id: Optional[str] = None moderations: Optional[PromptModerationParameters] = None name: str output: Optional[str] = None parameters: Optional[TextGenerationParameters] = None prompt_id: Optional[str] = None public: Optional[bool] = None tags: Optional[list[PromptTag]] = None task: Optional[PromptResultTask] = None type: PromptType updated_at: Optional[AwareDatetime] = None usage_count: int
[docs] class PromptResultAuthor(ApiBaseModel): first_name: Optional[str] = None id: Optional[int] = None last_name: Optional[str] = None
[docs] class PromptResultTask(ApiBaseModel): icon: Optional[str] = None id: Optional[str] = None name: Optional[str] = None
[docs] class PromptTag(ApiBaseModel): id: str name: str type: PromptTagType
[docs] class PromptTagType(str, Enum): LANGUAGE = "language" INDUSTRY = "industry" MODEL_TYPE = "model_type"
[docs] class PromptTemplateData(ApiBaseModel): example_file_ids: Optional[list[str]] = Field(None, max_length=5, min_length=0)
[docs] class PromptType(str, Enum): PRIVATE = "private" PUBLIC = "public" COMMUNITY = "community" EXAMPLE = "example"
[docs] class RequestApiVersion(str, Enum): V0 = "v0" V1 = "v1" V2 = "v2"
[docs] class RequestChatConversationIdRetrieveResults(ApiBaseModel): created_at: AwareDatetime duration: int id: str parent_id: Optional[str] = None request: Optional[RequestChatConversationIdRetrieveResultsRequest] = None response: Optional[RequestChatConversationIdRetrieveResultsResponse] = None status: RequestStatus version: Optional[RequestResultVersion] = None
[docs] class RequestChatConversationIdRetrieveResultsRequest(ApiBaseModel): pass
[docs] class RequestChatConversationIdRetrieveResultsResponse(ApiBaseModel): pass
[docs] class RequestEndpoint(str, Enum): GENERATE = "generate" COMPARE = "compare" CHAT = "chat"
[docs] class RequestFeedbackCategory(str, Enum): INACCURATE = "inaccurate" NOT_RELEVANT = "not_relevant" OFFENSIVE_HARMFUL = "offensive_harmful" KNOWLEDGE_GAP = "knowledge_gap" OTHER_CONTENT = "other_content" TOO_LONG = "too_long" TOO_SHORT = "too_short" WRONG_TONE = "wrong_tone" WRONG_FORMAT = "wrong_format" OTHER_STYLE = "other_style" CORRECT_CONTENT = "correct_content" CORRECT_STYLE = "correct_style"
[docs] class RequestFeedbackResult(ApiBaseModel): api_request: str categories: list[str] comment: Optional[str] = None contact_consent: bool created_at: AwareDatetime id: int updated_at: AwareDatetime vote: Optional[RequestFeedbackVote] = None
[docs] class RequestFeedbackVote(str, Enum): UP = "up" DOWN = "down"
[docs] class RequestOrigin(str, Enum): API = "api" UI = "ui"
[docs] class RequestResultVersion(ApiBaseModel): api: Optional[str] = None date_: Optional[date] = Field(None, alias="date")
[docs] class RequestRetrieveResults(ApiBaseModel): created_at: AwareDatetime duration: int id: str request: Optional[dict[str, Any]] = None response: Optional[dict[str, Any]] = None status: RequestStatus version: Optional[RequestResultVersion] = None
[docs] class RequestStatus(str, Enum): SUCCESS = "success" ERROR = "error"
[docs] class SocialBiasOptions(ApiBaseModel): send_tokens: Optional[bool] = False threshold: Optional[float] = Field(0.75, gt=0.0, lt=1.0)
[docs] class SortDirection(str, Enum): ASC = "asc" DESC = "desc"
[docs] class StopReason(str, Enum): NOT_FINISHED = "not_finished" MAX_TOKENS = "max_tokens" EOS_TOKEN = "eos_token" CANCELLED = "cancelled" TIME_LIMIT = "time_limit" STOP_SEQUENCE = "stop_sequence" TOKEN_LIMIT = "token_limit" ERROR = "error"
[docs] class StorageProviderLocation(str, Enum): US_SOUTH = "us-south" US_EAST = "us-east"
[docs] class SystemPrompt(ApiBaseModel): author: Optional[SystemPromptAuthor] = None content: str created_at: AwareDatetime id: int name: str type: SystemPromptType
[docs] class SystemPromptAuthor(ApiBaseModel): first_name: Optional[str] = None id: int last_name: Optional[str] = None
[docs] class SystemPromptType(str, Enum): PRIVATE = "private" SYSTEM = "system"
[docs] class Tag(ApiBaseModel): id: str name: str type: TagType
[docs] class TagType(str, Enum): LANGUAGE = "language" INDUSTRY = "industry" MODEL_TYPE = "model_type"
[docs] class Tasks(ApiBaseModel): categorization: bool csv_example: Optional[str] = None file_format_id: Optional[int] = None id: str json_example: Optional[str] = None jsonl_example: Optional[str] = None name: str tune: bool verbalizer: Optional[str] = None
[docs] class TextChatGenerationStreamResult(ApiBaseModel): generated_text: str generated_token_count: int generated_tokens: Optional[list[BaseTokens]] = None input_text: Optional[str] = None input_token_count: Optional[int] = None input_tokens: Optional[list[BaseTokens]] = None seed: Optional[float] = None stop_reason: StopReason stop_sequence: Optional[str] = None
[docs] class TextClassificationCreateData(ApiBaseModel): labels: list[str] text: str
[docs] class TextClassificationResult(ApiBaseModel): classification_type: TextClassificationType log_likelihood: dict[str, float] model_input: str model_output: str predictions: list[str]
[docs] class TextClassificationType(str, Enum): MULTI_CLASS = "multi_class" MULTI_LABEL = "multi_label" BINARY = "binary"
[docs] class TextCreateResponseModeration(ApiBaseModel): hap: Optional[list[TextModeration]] = None social_bias: Optional[list[TextModeration]] = None
[docs] class TextEmbeddingLimit(ApiBaseModel): concurrency: ConcurrencyLimit
[docs] class TextEmbeddingParameters(ApiBaseModel): return_options: Optional[_TextEmbeddingCreateRequestParametersReturnOptions] = None truncate_input_tokens: Optional[bool] = None
[docs] class TextGenerationComparisonCreateRequestRequest(ApiBaseModel): data: Optional[PromptTemplateData] = None input: str model_id: Optional[str] = None moderations: Optional[ModerationParameters] = None parameters: Optional[TextGenerationParameters] = None prompt_id: Optional[str] = None use_default: Optional[bool] = None
[docs] class TextGenerationComparisonCreateResults(ApiBaseModel): error: Optional[Any] = None parameters: TextGenerationComparisonCreateResultsParameters result: Optional[TextGenerationComparisonCreateResultsResult] = None
[docs] class TextGenerationComparisonCreateResultsParameters(ApiBaseModel): length_penalty: Optional[dict[str, Any]] = None model_id: Optional[str] = None repetition_penalty: Optional[float] = None temperature: Optional[float] = None top_k: Optional[int] = None top_p: Optional[float] = None typical_p: Optional[float] = None
[docs] class TextGenerationComparisonCreateResultsResult(ApiBaseModel): created_at: AwareDatetime id: str input_parameters: Optional[dict[str, Any]] = None model_id: str results: list[TextGenerationResult]
[docs] class TextGenerationComparisonParameters(ApiBaseModel): length_penalty: Optional[list[dict[str, Any]]] = Field(None, max_length=10, min_length=1) model_id: Optional[list[str]] = Field(None, max_length=10, min_length=1) repetition_penalty: Optional[list[float]] = Field(None, max_length=10, min_length=1) temperature: Optional[list[float]] = Field(None, max_length=10, min_length=1) top_k: Optional[list[int]] = Field(None, max_length=10, min_length=1) top_p: Optional[list[float]] = Field(None, max_length=10, min_length=1) typical_p: Optional[list[float]] = Field(None, max_length=10, min_length=1)
[docs] class TextGenerationFeedbackCategory(str, Enum): INACCURATE = "inaccurate" NOT_RELEVANT = "not_relevant" OFFENSIVE_HARMFUL = "offensive_harmful" KNOWLEDGE_GAP = "knowledge_gap" OTHER_CONTENT = "other_content" TOO_LONG = "too_long" TOO_SHORT = "too_short" WRONG_TONE = "wrong_tone" WRONG_FORMAT = "wrong_format" OTHER_STYLE = "other_style" CORRECT_CONTENT = "correct_content" CORRECT_STYLE = "correct_style"
[docs] class TextGenerationFeedbackResult(ApiBaseModel): api_request: str categories: list[str] comment: Optional[str] = None contact_consent: bool created_at: AwareDatetime id: int updated_at: AwareDatetime vote: Optional[TextGenerationFeedbackVote] = None
[docs] class TextGenerationFeedbackVote(str, Enum): UP = "up" DOWN = "down"
[docs] class TextGenerationLimit(ApiBaseModel): concurrency: ConcurrencyLimit
[docs] class TextGenerationParameters(ApiBaseModel): beam_width: Optional[int] = Field(None, ge=0, le=3, title="Beam width") """ At each step, or token, the algorithm keeps track of the n (off=1, 2, or 3) most probable sequences (beams) and selects the one with the highest probability. This continues until the stop sequence is met. """ decoding_method: Optional[DecodingMethod] = None include_stop_sequence: Optional[bool] = None length_penalty: Optional[LengthPenalty] = None max_new_tokens: Optional[int] = Field(None, ge=0, title="Max new tokens") """ Define the maximum number of tokens to generate. """ min_new_tokens: Optional[int] = Field(None, ge=0, title="Min new tokens") """ If stop sequences are given, they are ignored until minimum tokens are generated. """ random_seed: Optional[int] = Field(None, ge=1, le=4294967295, title="Random seed") """ Controls the random sampling of the generated tokens when sampling is enabled. Setting the random seed to a the same number for each generation ensures experimental repeatability. """ repetition_penalty: Optional[float] = Field(None, ge=1.0, le=2.0, multiple_of=0.01, title="Repetition penalty") """ The parameter for repetition penalty. 1.00 means no penalty. """ return_options: Optional[TextGenerationReturnOptions] = None stop_sequences: Optional[list[str]] = Field( None, examples=['[" and "]'], max_length=6, min_length=1, title="Stop sequences" ) """ Stop sequences are one or more strings which will cause the text generation to stop if/when they are produced as part of the output. Stop sequences encountered prior to the minimum number of tokens being generated will be ignored. """ temperature: Optional[float] = Field(None, ge=0.0, le=2.0, multiple_of=0.01, title="Temperature") """ Control the creativity of generated text. Higher values will lead to more randomly generated outputs. """ time_limit: Optional[int] = Field(None, title="Time limit") """ Time limit in milliseconds - if not completed within this time, generation will stop. The text generated so far will be returned along with the `TIME_LIMIT` stop reason. """ top_k: Optional[int] = Field(None, ge=1, le=100, title="Top K") """ Set the number of highest probability vocabulary tokens to keep for top-k-filtering. Lower values make it less likely the model will go off topic. """ top_p: Optional[float] = Field(None, ge=0.0, le=1.0, multiple_of=0.01, title="Top P (nucleus sampling)") """ If < 1.0, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are used. """ truncate_input_tokens: Optional[int] = Field(None, ge=0, title="Truncate input tokens") """ Truncate to this many input tokens. Can be used to avoid requests failing due to input being longer than configured limits. Zero means don't truncate. """ typical_p: Optional[float] = Field(None, ge=0.01, le=1.0, multiple_of=0.01, title="Typical P") """ Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next, given the partial text already generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to typical_p or higher are kept for generation. 1.00 means a neutral value. """
[docs] class TextGenerationResult(ApiBaseModel): generated_text: str generated_token_count: int generated_tokens: Optional[list[BaseTokens]] = None input_text: Optional[str] = None input_token_count: Optional[int] = None input_tokens: Optional[list[BaseTokens]] = None moderations: Optional[TextCreateResponseModeration] = None seed: Optional[float] = None stop_reason: StopReason stop_sequence: Optional[str] = None
[docs] class TextGenerationReturnOptions(ApiBaseModel): generated_tokens: Optional[bool] = Field(False, title="Generated Tokens") """ Include list of individual generated tokens """ input_parameters: Optional[bool] = None input_text: Optional[bool] = Field(False, title="Input text") """ Include input text """ input_tokens: Optional[bool] = Field(False, title="Input Tokens") """ Include list of input tokens """ token_logprobs: Optional[bool] = Field(False, title="Token logprobs") """ Include logprob for each returned token """ token_ranks: Optional[bool] = Field(False, title="Token ranks") """ Include rank of each returned token """ top_n_tokens: Optional[int] = Field(None, ge=0, le=5, title="Top N tokens") """ Include top n candidate tokens at the position of each returned token """
[docs] class TextGenerationStreamResult(ApiBaseModel): generated_text: str generated_token_count: int generated_tokens: Optional[list[BaseTokens]] = None input_text: Optional[str] = None input_token_count: Optional[int] = None input_tokens: Optional[list[BaseTokens]] = None seed: Optional[float] = None stop_reason: StopReason stop_sequence: Optional[str] = None
[docs] class TextModeration(ApiBaseModel): flagged: bool position: ModerationPosition score: float success: bool tokens: Optional[list[ModerationTokens]] = None
[docs] class TextRerankParameters(ApiBaseModel): return_options: Optional[TextRerankReturnOptions] = None truncate_input_tokens: Optional[bool] = None
[docs] class TextRerankResult(ApiBaseModel): score: float
[docs] class TextRerankReturnOptions(ApiBaseModel): documents: Optional[bool] = None query: Optional[bool] = None top_n: Optional[float] = Field(None, ge=1.0)
[docs] class TextSentenceSimilarityCreateResult(ApiBaseModel): score: float
[docs] class TextSentenceSimilarityParameters(ApiBaseModel): truncate_input_tokens: Optional[bool] = None
[docs] class TextTokenizationCreateResults(ApiBaseModel): input_text: Optional[str] = None token_count: int tokens: Optional[list[str]] = None
[docs] class TextTokenizationParameters(ApiBaseModel): return_options: Optional[TextTokenizationReturnOptions] = None
[docs] class TextTokenizationReturnOptions(ApiBaseModel): input_text: Optional[bool] = None tokens: Optional[bool] = None
[docs] class TimeSeriesLimit(ApiBaseModel): concurrency: ConcurrencyLimit
[docs] class TooManyRequestsResponse(BaseErrorResponse): extensions: Extensions3 status_code: Literal[429] = 429
[docs] class TrimMethod(str, Enum): FLOATING_WINDOW = "floating_window" NONE = "none"
[docs] class TuneAssetType(str, Enum): VECTORS = "vectors" LOGS = "logs" EXPORT = "export"
[docs] class TuneListSortBy(str, Enum): STATUS = "status" CREATED_AT = "created_at" NAME = "name" ID = "id" MODEL = "model"
[docs] class TuneParameters(ApiBaseModel): accumulate_steps: Optional[int] = None batch_size: Optional[int] = None learning_rate: Optional[float] = None max_input_tokens: Optional[int] = None max_output_tokens: Optional[int] = None num_epochs: Optional[int] = None num_virtual_tokens: Optional[int] = None verbalizer: Optional[str] = None
[docs] class TuneResult(ApiBaseModel): contents: Optional[list[TuneResultContent]] = None created_at: AwareDatetime datapoints: Optional[TuneResultDatapoint] = None evaluation_files: Optional[list[TuneResultFiles]] = None finished_at: Optional[AwareDatetime] = None id: str last_used_at: Optional[AwareDatetime] = None model_id: str model_name: str name: str parameters: Optional[dict[str, Any]] = None preferred: bool started_at: Optional[AwareDatetime] = None status: TuneStatus status_message: Optional[str] = None task_id: str task_name: str training_files: Optional[list[TuneResultFiles]] = None tuning_type: str validation_files: Optional[list[TuneResultFiles]] = None vectors: Optional[str] = None
[docs] class TuneResultContent(ApiBaseModel): name: str
[docs] class TuneResultDatapoint(ApiBaseModel): loss: list[TunesResultDatapointLoss] validation_loss: Optional[list[TuneResultDatapointValidationLoss]] = None
[docs] class TuneResultDatapointLossData(ApiBaseModel): epoch: int step: Optional[int] = None value: float @field_validator("epoch", mode="before") @classmethod def _validate_epoch(cls, value: Any): result_value = int(value) if result_value != float(value): warnings.warn(f"The epoch was rounded down from {value} to {result_value}", stacklevel=4) return result_value
[docs] class TuneResultDatapointValidationLoss(ApiBaseModel): data: TuneResultDatapointValidationLossData timestamp: AwareDatetime
[docs] class TuneResultDatapointValidationLossData(ApiBaseModel): epoch: int step: Optional[int] = None value: float @field_validator("epoch", mode="before") @classmethod def _validate_epoch(cls, value: Any): result_value = int(value) if result_value != float(value): warnings.warn(f"The epoch was rounded down from {value} to {result_value}", stacklevel=4) return result_value
[docs] class TuneResultFiles(ApiBaseModel): created_at: Optional[AwareDatetime] = None file_name: str id: str
[docs] class TuneStatus(str, Enum): INITIALIZING = "initializing" NOT_STARTED = "not_started" PENDING = "pending" HALTED = "halted" RUNNING = "running" QUEUED = "queued" COMPLETED = "completed" FAILED = "failed"
[docs] class TunesResultDatapointLoss(ApiBaseModel): data: TuneResultDatapointLossData timestamp: AwareDatetime
[docs] class TuningTypeRetrieveResults(ApiBaseModel): id: Optional[str] = None model_ids: Optional[list[str]] = None name: Optional[str] = None schema_: Optional[dict[str, Any]] = Field(None, alias="schema") """ JSON Schema """
[docs] class UnauthorizedResponse(BaseErrorResponse): extensions: Extensions4 status_code: Literal[401] = 401
[docs] class UnavailableResponse(BaseErrorResponse): extensions: Extensions5 status_code: Literal[503] = 503
[docs] class UserApiKey(ApiBaseModel): created_at: str generated_at: str last_used_at: Optional[str] = None value: str
[docs] class UserCreateResult(ApiBaseModel): api_key: UserApiKey data_usage_consent: bool email: str first_name: Optional[str] = None generate_default: Optional[UserGenerationDefault] = None id: int last_name: Optional[str] = None tou_accepted: bool tou_accepted_at: Optional[str] = None user_id: str
[docs] class UserGenerationDefault(ApiBaseModel): model_id: Optional[str] = None parameters: Optional[TextGenerationParameters] = None
[docs] class UserResult(ApiBaseModel): data_usage_consent: bool email: str first_name: Optional[str] = None generate_default: Optional[UserGenerationDefault] = None id: int last_name: Optional[str] = None tou_accepted: bool tou_accepted_at: Optional[str] = None
[docs] class BadRequestResponse(BaseErrorResponse): extensions: Extensions status_code: Literal[400] = 400
__all__ = [ "ApiKeyRegenerateCreateResponse", "ApiKeyResult", "ApiKeyRetrieveResponse", "BadRequestResponse", "BaseErrorExtension", "BaseErrorResponse", "BaseMessage", "BaseTokens", "ChatRole", "ConcurrencyLimit", "DecodingMethod", "DeploymentCreateResponse", "DeploymentIdRetrieveResponse", "DeploymentResult", "DeploymentRetrieveResponse", "DeploymentStatus", "EvaluationCreateResponse", "EvaluationExperiment", "EvaluationExperimentCreateResponse", "EvaluationExperimentIdRetrieveResponse", "EvaluationExperimentRetrieveResponse", "EvaluationExperimentSortBy", "EvaluationFieldOperation", "EvaluationFile", "EvaluationIdInstanceResultRetrieveResponse", "EvaluationIdRetrieveResponse", "EvaluationInstanceResult", "EvaluationLimit", "EvaluationLimitRetrieveResponse", "EvaluationOperationType", "EvaluationParentTask", "EvaluationPreviewCreateResponse", "EvaluationPrompt", "EvaluationResult", "EvaluationRetrieveResponse", "EvaluationSortBy", "EvaluationStatus", "EvaluationTask", "EvaluationTemplate", "EvaluationTemplateIdRetrieveResponse", "EvaluationTemplateRetrieveResponse", "Extensions", "Extensions1", "Extensions2", "Extensions3", "Extensions4", "Extensions5", "FileCreateResponse", "FileDescendant", "FileFormat", "FileIdPatchResponse", "FileIdRetrieveResponse", "FileListSortBy", "FileMetadata", "FileMetadataStats", "FilePurpose", "FileResult", "FileRetrieveResponse", "FolderCreateResponse", "FolderIdPatchResponse", "FolderIdRetrieveResponse", "FolderIdUpdateResponse", "FolderResult", "FolderRetrieveResponse", "GeneratedToken", "HAPOptions", "Input", "InternalServerErrorResponse", "LengthPenalty", "MessageFile", "Metric", "Metrics", "ModelFacet", "ModelFacetType", "ModelFamily", "ModelIdRetrieveResponse", "ModelIdRetrieveResult", "ModelRetrieveResponse", "ModelRetrieveResults", "ModelTokenLimits", "ModelType", "ModerationHAP", "ModerationHAPInput", "ModerationHAPOutput", "ModerationImplicitHate", "ModerationImplicitHateInput", "ModerationImplicitHateOutput", "ModerationParameters", "ModerationPosition", "ModerationSocialBias", "ModerationSocialBiasInput", "ModerationSocialBiasOutput", "ModerationStigma", "ModerationStigmaInput", "ModerationStigmaOutput", "ModerationTokens", "NotFoundResponse", "PromptCreateResponse", "PromptIdPatchResponse", "PromptIdRetrieveResponse", "PromptIdUpdateResponse", "PromptListSortBy", "PromptListSource", "PromptModerationParameters", "PromptResult", "PromptResultAuthor", "PromptResultTask", "PromptRetrieveResponse", "PromptTag", "PromptTagType", "PromptTemplateData", "PromptType", "RequestApiVersion", "RequestChatConversationIdRetrieveResponse", "RequestChatConversationIdRetrieveResults", "RequestChatConversationIdRetrieveResultsRequest", "RequestChatConversationIdRetrieveResultsResponse", "RequestEndpoint", "RequestFeedbackCategory", "RequestFeedbackResult", "RequestFeedbackVote", "RequestIdFeedbackCreateResponse", "RequestIdFeedbackRetrieveResponse", "RequestIdFeedbackUpdateResponse", "RequestOrigin", "RequestResultVersion", "RequestRetrieveResponse", "RequestRetrieveResults", "RequestStatus", "SocialBiasOptions", "SortDirection", "StopReason", "StorageProviderLocation", "SystemPrompt", "SystemPromptAuthor", "SystemPromptCreateResponse", "SystemPromptIdRetrieveResponse", "SystemPromptIdUpdateResponse", "SystemPromptRetrieveResponse", "SystemPromptType", "Tag", "TagRetrieveResponse", "TagType", "TaskRetrieveResponse", "Tasks", "TextChatCreateResponse", "TextChatGenerationStreamResult", "TextChatOutputCreateResponse", "TextChatStreamCreateResponse", "TextClassificationCreateData", "TextClassificationCreateResponse", "TextClassificationResult", "TextClassificationType", "TextCreateResponseModeration", "TextEmbeddingCreateResponse", "TextEmbeddingLimit", "TextEmbeddingLimitRetrieveResponse", "TextEmbeddingParameters", "TextGenerationComparisonCreateRequestRequest", "TextGenerationComparisonCreateResponse", "TextGenerationComparisonCreateResults", "TextGenerationComparisonCreateResultsParameters", "TextGenerationComparisonCreateResultsResult", "TextGenerationComparisonParameters", "TextGenerationCreateResponse", "TextGenerationFeedbackCategory", "TextGenerationFeedbackResult", "TextGenerationFeedbackVote", "TextGenerationIdFeedbackCreateResponse", "TextGenerationIdFeedbackRetrieveResponse", "TextGenerationIdFeedbackUpdateResponse", "TextGenerationLimit", "TextGenerationLimitRetrieveResponse", "TextGenerationOutputCreateResponse", "TextGenerationParameters", "TextGenerationResult", "TextGenerationReturnOptions", "TextGenerationStreamCreateResponse", "TextGenerationStreamResult", "TextModeration", "TextModerationCreateResponse", "TextRerankCreateResponse", "TextRerankParameters", "TextRerankResult", "TextRerankReturnOptions", "TextSentenceSimilarityCreateResponse", "TextSentenceSimilarityCreateResult", "TextSentenceSimilarityParameters", "TextTokenizationCreateResponse", "TextTokenizationCreateResults", "TextTokenizationParameters", "TextTokenizationReturnOptions", "TimeSerieForecastingCreateResponse", "TimeSerieLimitRetrieveResponse", "TimeSeriesLimit", "TooManyRequestsResponse", "TrimMethod", "TuneAssetType", "TuneCreateResponse", "TuneFromFileCreateResponse", "TuneIdPatchResponse", "TuneIdRetrieveResponse", "TuneListSortBy", "TuneParameters", "TunePreflightCreateResponse", "TuneResult", "TuneResultContent", "TuneResultDatapoint", "TuneResultDatapointLossData", "TuneResultDatapointValidationLoss", "TuneResultDatapointValidationLossData", "TuneResultFiles", "TuneRetrieveResponse", "TuneStatus", "TunesResultDatapointLoss", "TuningTypeRetrieveResponse", "TuningTypeRetrieveResults", "UnauthorizedResponse", "UnavailableResponse", "UserApiKey", "UserCreateResponse", "UserCreateResult", "UserGenerationDefault", "UserPatchResponse", "UserResult", "UserRetrieveResponse", ]