diff --git a/.openapi-generator-ignore b/.openapi-generator-ignore new file mode 100644 index 00000000..140a9113 --- /dev/null +++ b/.openapi-generator-ignore @@ -0,0 +1,19 @@ +# OpenAPI Generator Ignore +# Files listed here will not be overwritten by the generator + +# Custom wrapper code +launch/__init__.py +launch/client.py +launch/model_endpoint.py + +# Project files we maintain manually +README.md +.gitignore +requirements.txt + +# Type stub files (have syntax errors due to generator bug) +**/*.pyi + +# Generated docs (have invalid Python syntax in examples due to generator bug) +launch/api_client_README.md +launch/api_client/docs/** diff --git a/.openapi-generator/FILES b/.openapi-generator/FILES new file mode 100644 index 00000000..3b4c5ac1 --- /dev/null +++ b/.openapi-generator/FILES @@ -0,0 +1,456 @@ +launch/api_client/__init__.py +launch/api_client/api_client.py +launch/api_client/apis/__init__.py +launch/api_client/apis/tags/default_api.py +launch/api_client/configuration.py +launch/api_client/docs/apis/tags/DefaultApi.md +launch/api_client/docs/models/Annotation.md +launch/api_client/docs/models/Audio.md +launch/api_client/docs/models/Audio1.md +launch/api_client/docs/models/Audio2.md +launch/api_client/docs/models/BatchCompletionsJob.md +launch/api_client/docs/models/BatchCompletionsJobStatus.md +launch/api_client/docs/models/BatchCompletionsModelConfig.md +launch/api_client/docs/models/BatchJobSerializationFormat.md +launch/api_client/docs/models/BatchJobStatus.md +launch/api_client/docs/models/CallbackAuth.md +launch/api_client/docs/models/CallbackBasicAuth.md +launch/api_client/docs/models/CallbackmTLSAuth.md +launch/api_client/docs/models/CancelBatchCompletionsV2Response.md +launch/api_client/docs/models/CancelFineTuneResponse.md +launch/api_client/docs/models/ChatCompletionFunctionCallOption.md +launch/api_client/docs/models/ChatCompletionFunctions.md +launch/api_client/docs/models/ChatCompletionMessageToolCall.md +launch/api_client/docs/models/ChatCompletionMessageToolCallChunk.md +launch/api_client/docs/models/ChatCompletionMessageToolCallsInput.md +launch/api_client/docs/models/ChatCompletionMessageToolCallsOutput.md +launch/api_client/docs/models/ChatCompletionNamedToolChoice.md +launch/api_client/docs/models/ChatCompletionRequestAssistantMessage.md +launch/api_client/docs/models/ChatCompletionRequestAssistantMessageContentPart.md +launch/api_client/docs/models/ChatCompletionRequestDeveloperMessage.md +launch/api_client/docs/models/ChatCompletionRequestFunctionMessage.md +launch/api_client/docs/models/ChatCompletionRequestMessage.md +launch/api_client/docs/models/ChatCompletionRequestMessageContentPartAudio.md +launch/api_client/docs/models/ChatCompletionRequestMessageContentPartFile.md +launch/api_client/docs/models/ChatCompletionRequestMessageContentPartImage.md +launch/api_client/docs/models/ChatCompletionRequestMessageContentPartRefusal.md +launch/api_client/docs/models/ChatCompletionRequestMessageContentPartText.md +launch/api_client/docs/models/ChatCompletionRequestSystemMessage.md +launch/api_client/docs/models/ChatCompletionRequestSystemMessageContentPart.md +launch/api_client/docs/models/ChatCompletionRequestToolMessage.md +launch/api_client/docs/models/ChatCompletionRequestToolMessageContentPart.md +launch/api_client/docs/models/ChatCompletionRequestUserMessage.md +launch/api_client/docs/models/ChatCompletionRequestUserMessageContentPart.md +launch/api_client/docs/models/ChatCompletionResponseMessage.md +launch/api_client/docs/models/ChatCompletionStreamOptions.md +launch/api_client/docs/models/ChatCompletionStreamResponseDelta.md +launch/api_client/docs/models/ChatCompletionTokenLogprob.md +launch/api_client/docs/models/ChatCompletionTool.md +launch/api_client/docs/models/ChatCompletionToolChoiceOption.md +launch/api_client/docs/models/ChatCompletionV2Request.md +launch/api_client/docs/models/ChatCompletionV2StreamErrorChunk.md +launch/api_client/docs/models/Choice.md +launch/api_client/docs/models/Choice1.md +launch/api_client/docs/models/Choice2.md +launch/api_client/docs/models/CloneModelBundleV1Request.md +launch/api_client/docs/models/CloneModelBundleV2Request.md +launch/api_client/docs/models/CloudpickleArtifactFlavor.md +launch/api_client/docs/models/CompletionOutput.md +launch/api_client/docs/models/CompletionStreamOutput.md +launch/api_client/docs/models/CompletionStreamV1Request.md +launch/api_client/docs/models/CompletionStreamV1Response.md +launch/api_client/docs/models/CompletionSyncV1Request.md +launch/api_client/docs/models/CompletionSyncV1Response.md +launch/api_client/docs/models/CompletionTokensDetails.md +launch/api_client/docs/models/CompletionUsage.md +launch/api_client/docs/models/CompletionV2Request.md +launch/api_client/docs/models/CompletionV2StreamErrorChunk.md +launch/api_client/docs/models/Content.md +launch/api_client/docs/models/Content1.md +launch/api_client/docs/models/Content2.md +launch/api_client/docs/models/Content3.md +launch/api_client/docs/models/Content4.md +launch/api_client/docs/models/Content8.md +launch/api_client/docs/models/CreateAsyncTaskV1Response.md +launch/api_client/docs/models/CreateBatchCompletionsV1ModelConfig.md +launch/api_client/docs/models/CreateBatchCompletionsV1Request.md +launch/api_client/docs/models/CreateBatchCompletionsV1RequestContent.md +launch/api_client/docs/models/CreateBatchCompletionsV1Response.md +launch/api_client/docs/models/CreateBatchCompletionsV2Request.md +launch/api_client/docs/models/CreateBatchJobResourceRequests.md +launch/api_client/docs/models/CreateBatchJobV1Request.md +launch/api_client/docs/models/CreateBatchJobV1Response.md +launch/api_client/docs/models/CreateChatCompletionResponse.md +launch/api_client/docs/models/CreateChatCompletionStreamResponse.md +launch/api_client/docs/models/CreateCompletionResponse.md +launch/api_client/docs/models/CreateDeepSpeedModelEndpointRequest.md +launch/api_client/docs/models/CreateDockerImageBatchJobBundleV1Request.md +launch/api_client/docs/models/CreateDockerImageBatchJobBundleV1Response.md +launch/api_client/docs/models/CreateDockerImageBatchJobResourceRequests.md +launch/api_client/docs/models/CreateDockerImageBatchJobV1Request.md +launch/api_client/docs/models/CreateDockerImageBatchJobV1Response.md +launch/api_client/docs/models/CreateFineTuneRequest.md +launch/api_client/docs/models/CreateFineTuneResponse.md +launch/api_client/docs/models/CreateLLMModelEndpointV1Request.md +launch/api_client/docs/models/CreateLLMModelEndpointV1Response.md +launch/api_client/docs/models/CreateLightLLMModelEndpointRequest.md +launch/api_client/docs/models/CreateModelBundleV1Request.md +launch/api_client/docs/models/CreateModelBundleV1Response.md +launch/api_client/docs/models/CreateModelBundleV2Request.md +launch/api_client/docs/models/CreateModelBundleV2Response.md +launch/api_client/docs/models/CreateModelEndpointV1Request.md +launch/api_client/docs/models/CreateModelEndpointV1Response.md +launch/api_client/docs/models/CreateSGLangModelEndpointRequest.md +launch/api_client/docs/models/CreateTensorRTLLMModelEndpointRequest.md +launch/api_client/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md +launch/api_client/docs/models/CreateTriggerV1Request.md +launch/api_client/docs/models/CreateTriggerV1Response.md +launch/api_client/docs/models/CreateVLLMModelEndpointRequest.md +launch/api_client/docs/models/CustomFramework.md +launch/api_client/docs/models/DeleteFileResponse.md +launch/api_client/docs/models/DeleteLLMEndpointResponse.md +launch/api_client/docs/models/DeleteModelEndpointV1Response.md +launch/api_client/docs/models/DeleteTriggerV1Response.md +launch/api_client/docs/models/DockerImageBatchJob.md +launch/api_client/docs/models/DockerImageBatchJobBundleV1Response.md +launch/api_client/docs/models/EndpointPredictV1Request.md +launch/api_client/docs/models/File.md +launch/api_client/docs/models/FilteredChatCompletionV2Request.md +launch/api_client/docs/models/FilteredCompletionV2Request.md +launch/api_client/docs/models/Function1.md +launch/api_client/docs/models/Function2.md +launch/api_client/docs/models/Function3.md +launch/api_client/docs/models/FunctionCall.md +launch/api_client/docs/models/FunctionCall2.md +launch/api_client/docs/models/FunctionObject.md +launch/api_client/docs/models/FunctionParameters.md +launch/api_client/docs/models/GetAsyncTaskV1Response.md +launch/api_client/docs/models/GetBatchCompletionV2Response.md +launch/api_client/docs/models/GetBatchJobV1Response.md +launch/api_client/docs/models/GetDockerImageBatchJobV1Response.md +launch/api_client/docs/models/GetFileContentResponse.md +launch/api_client/docs/models/GetFileResponse.md +launch/api_client/docs/models/GetFineTuneEventsResponse.md +launch/api_client/docs/models/GetFineTuneResponse.md +launch/api_client/docs/models/GetLLMModelEndpointV1Response.md +launch/api_client/docs/models/GetModelEndpointV1Response.md +launch/api_client/docs/models/GetTriggerV1Response.md +launch/api_client/docs/models/GpuType.md +launch/api_client/docs/models/HTTPValidationError.md +launch/api_client/docs/models/ImageUrl.md +launch/api_client/docs/models/InputAudio.md +launch/api_client/docs/models/JsonSchema.md +launch/api_client/docs/models/LLMFineTuneEvent.md +launch/api_client/docs/models/LLMInferenceFramework.md +launch/api_client/docs/models/LLMSource.md +launch/api_client/docs/models/ListDockerImageBatchJobBundleV1Response.md +launch/api_client/docs/models/ListDockerImageBatchJobsV1Response.md +launch/api_client/docs/models/ListFilesResponse.md +launch/api_client/docs/models/ListFineTunesResponse.md +launch/api_client/docs/models/ListLLMModelEndpointsV1Response.md +launch/api_client/docs/models/ListModelBundlesV1Response.md +launch/api_client/docs/models/ListModelBundlesV2Response.md +launch/api_client/docs/models/ListModelEndpointsV1Response.md +launch/api_client/docs/models/ListTriggersV1Response.md +launch/api_client/docs/models/Logprobs.md +launch/api_client/docs/models/Logprobs2.md +launch/api_client/docs/models/Metadata.md +launch/api_client/docs/models/ModelBundleEnvironmentParams.md +launch/api_client/docs/models/ModelBundleFrameworkType.md +launch/api_client/docs/models/ModelBundleOrderBy.md +launch/api_client/docs/models/ModelBundlePackagingType.md +launch/api_client/docs/models/ModelBundleV1Response.md +launch/api_client/docs/models/ModelBundleV2Response.md +launch/api_client/docs/models/ModelDownloadRequest.md +launch/api_client/docs/models/ModelDownloadResponse.md +launch/api_client/docs/models/ModelEndpointDeploymentState.md +launch/api_client/docs/models/ModelEndpointOrderBy.md +launch/api_client/docs/models/ModelEndpointResourceState.md +launch/api_client/docs/models/ModelEndpointStatus.md +launch/api_client/docs/models/ModelEndpointType.md +launch/api_client/docs/models/ParallelToolCalls.md +launch/api_client/docs/models/PredictionContent.md +launch/api_client/docs/models/Prompt.md +launch/api_client/docs/models/Prompt1.md +launch/api_client/docs/models/Prompt1Item.md +launch/api_client/docs/models/PromptTokensDetails.md +launch/api_client/docs/models/PytorchFramework.md +launch/api_client/docs/models/Quantization.md +launch/api_client/docs/models/ReasoningEffort.md +launch/api_client/docs/models/RequestSchema.md +launch/api_client/docs/models/ResponseFormatJsonObject.md +launch/api_client/docs/models/ResponseFormatJsonSchema.md +launch/api_client/docs/models/ResponseFormatJsonSchemaSchema.md +launch/api_client/docs/models/ResponseFormatText.md +launch/api_client/docs/models/ResponseModalities.md +launch/api_client/docs/models/ResponseSchema.md +launch/api_client/docs/models/RestartModelEndpointV1Response.md +launch/api_client/docs/models/RunnableImageFlavor.md +launch/api_client/docs/models/ServiceTier.md +launch/api_client/docs/models/StopConfiguration.md +launch/api_client/docs/models/StopConfiguration1.md +launch/api_client/docs/models/StreamError.md +launch/api_client/docs/models/StreamErrorContent.md +launch/api_client/docs/models/StreamingEnhancedRunnableImageFlavor.md +launch/api_client/docs/models/SyncEndpointPredictV1Request.md +launch/api_client/docs/models/SyncEndpointPredictV1Response.md +launch/api_client/docs/models/TaskStatus.md +launch/api_client/docs/models/TensorflowFramework.md +launch/api_client/docs/models/TokenOutput.md +launch/api_client/docs/models/ToolConfig.md +launch/api_client/docs/models/TopLogprob.md +launch/api_client/docs/models/TritonEnhancedRunnableImageFlavor.md +launch/api_client/docs/models/UpdateBatchCompletionsV2Request.md +launch/api_client/docs/models/UpdateBatchCompletionsV2Response.md +launch/api_client/docs/models/UpdateBatchJobV1Request.md +launch/api_client/docs/models/UpdateBatchJobV1Response.md +launch/api_client/docs/models/UpdateDeepSpeedModelEndpointRequest.md +launch/api_client/docs/models/UpdateDockerImageBatchJobV1Request.md +launch/api_client/docs/models/UpdateDockerImageBatchJobV1Response.md +launch/api_client/docs/models/UpdateLLMModelEndpointV1Request.md +launch/api_client/docs/models/UpdateLLMModelEndpointV1Response.md +launch/api_client/docs/models/UpdateModelEndpointV1Request.md +launch/api_client/docs/models/UpdateModelEndpointV1Response.md +launch/api_client/docs/models/UpdateSGLangModelEndpointRequest.md +launch/api_client/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md +launch/api_client/docs/models/UpdateTriggerV1Request.md +launch/api_client/docs/models/UpdateTriggerV1Response.md +launch/api_client/docs/models/UpdateVLLMModelEndpointRequest.md +launch/api_client/docs/models/UploadFileResponse.md +launch/api_client/docs/models/UrlCitation.md +launch/api_client/docs/models/UserLocation.md +launch/api_client/docs/models/ValidationError.md +launch/api_client/docs/models/VoiceIdsShared.md +launch/api_client/docs/models/WebSearchContextSize.md +launch/api_client/docs/models/WebSearchLocation.md +launch/api_client/docs/models/WebSearchOptions.md +launch/api_client/docs/models/ZipArtifactFlavor.md +launch/api_client/exceptions.py +launch/api_client/model/__init__.py +launch/api_client/model/annotation.py +launch/api_client/model/audio.py +launch/api_client/model/audio1.py +launch/api_client/model/audio2.py +launch/api_client/model/batch_completions_job.py +launch/api_client/model/batch_completions_job_status.py +launch/api_client/model/batch_completions_model_config.py +launch/api_client/model/batch_job_serialization_format.py +launch/api_client/model/batch_job_status.py +launch/api_client/model/callback_auth.py +launch/api_client/model/callback_basic_auth.py +launch/api_client/model/callbackm_tls_auth.py +launch/api_client/model/cancel_batch_completions_v2_response.py +launch/api_client/model/cancel_fine_tune_response.py +launch/api_client/model/chat_completion_function_call_option.py +launch/api_client/model/chat_completion_functions.py +launch/api_client/model/chat_completion_message_tool_call.py +launch/api_client/model/chat_completion_message_tool_call_chunk.py +launch/api_client/model/chat_completion_message_tool_calls_input.py +launch/api_client/model/chat_completion_message_tool_calls_output.py +launch/api_client/model/chat_completion_named_tool_choice.py +launch/api_client/model/chat_completion_request_assistant_message.py +launch/api_client/model/chat_completion_request_assistant_message_content_part.py +launch/api_client/model/chat_completion_request_developer_message.py +launch/api_client/model/chat_completion_request_function_message.py +launch/api_client/model/chat_completion_request_message.py +launch/api_client/model/chat_completion_request_message_content_part_audio.py +launch/api_client/model/chat_completion_request_message_content_part_file.py +launch/api_client/model/chat_completion_request_message_content_part_image.py +launch/api_client/model/chat_completion_request_message_content_part_refusal.py +launch/api_client/model/chat_completion_request_message_content_part_text.py +launch/api_client/model/chat_completion_request_system_message.py +launch/api_client/model/chat_completion_request_system_message_content_part.py +launch/api_client/model/chat_completion_request_tool_message.py +launch/api_client/model/chat_completion_request_tool_message_content_part.py +launch/api_client/model/chat_completion_request_user_message.py +launch/api_client/model/chat_completion_request_user_message_content_part.py +launch/api_client/model/chat_completion_response_message.py +launch/api_client/model/chat_completion_stream_options.py +launch/api_client/model/chat_completion_stream_response_delta.py +launch/api_client/model/chat_completion_token_logprob.py +launch/api_client/model/chat_completion_tool.py +launch/api_client/model/chat_completion_tool_choice_option.py +launch/api_client/model/chat_completion_v2_request.py +launch/api_client/model/chat_completion_v2_stream_error_chunk.py +launch/api_client/model/choice.py +launch/api_client/model/choice1.py +launch/api_client/model/choice2.py +launch/api_client/model/clone_model_bundle_v1_request.py +launch/api_client/model/clone_model_bundle_v2_request.py +launch/api_client/model/cloudpickle_artifact_flavor.py +launch/api_client/model/completion_output.py +launch/api_client/model/completion_stream_output.py +launch/api_client/model/completion_stream_v1_request.py +launch/api_client/model/completion_stream_v1_response.py +launch/api_client/model/completion_sync_v1_request.py +launch/api_client/model/completion_sync_v1_response.py +launch/api_client/model/completion_tokens_details.py +launch/api_client/model/completion_usage.py +launch/api_client/model/completion_v2_request.py +launch/api_client/model/completion_v2_stream_error_chunk.py +launch/api_client/model/content.py +launch/api_client/model/content1.py +launch/api_client/model/content2.py +launch/api_client/model/content3.py +launch/api_client/model/content4.py +launch/api_client/model/content8.py +launch/api_client/model/create_async_task_v1_response.py +launch/api_client/model/create_batch_completions_v1_model_config.py +launch/api_client/model/create_batch_completions_v1_request.py +launch/api_client/model/create_batch_completions_v1_request_content.py +launch/api_client/model/create_batch_completions_v1_response.py +launch/api_client/model/create_batch_completions_v2_request.py +launch/api_client/model/create_batch_job_resource_requests.py +launch/api_client/model/create_batch_job_v1_request.py +launch/api_client/model/create_batch_job_v1_response.py +launch/api_client/model/create_chat_completion_response.py +launch/api_client/model/create_chat_completion_stream_response.py +launch/api_client/model/create_completion_response.py +launch/api_client/model/create_deep_speed_model_endpoint_request.py +launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py +launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py +launch/api_client/model/create_docker_image_batch_job_resource_requests.py +launch/api_client/model/create_docker_image_batch_job_v1_request.py +launch/api_client/model/create_docker_image_batch_job_v1_response.py +launch/api_client/model/create_fine_tune_request.py +launch/api_client/model/create_fine_tune_response.py +launch/api_client/model/create_light_llm_model_endpoint_request.py +launch/api_client/model/create_llm_model_endpoint_v1_request.py +launch/api_client/model/create_llm_model_endpoint_v1_response.py +launch/api_client/model/create_model_bundle_v1_request.py +launch/api_client/model/create_model_bundle_v1_response.py +launch/api_client/model/create_model_bundle_v2_request.py +launch/api_client/model/create_model_bundle_v2_response.py +launch/api_client/model/create_model_endpoint_v1_request.py +launch/api_client/model/create_model_endpoint_v1_response.py +launch/api_client/model/create_sg_lang_model_endpoint_request.py +launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py +launch/api_client/model/create_text_generation_inference_model_endpoint_request.py +launch/api_client/model/create_trigger_v1_request.py +launch/api_client/model/create_trigger_v1_response.py +launch/api_client/model/create_vllm_model_endpoint_request.py +launch/api_client/model/custom_framework.py +launch/api_client/model/delete_file_response.py +launch/api_client/model/delete_llm_endpoint_response.py +launch/api_client/model/delete_model_endpoint_v1_response.py +launch/api_client/model/delete_trigger_v1_response.py +launch/api_client/model/docker_image_batch_job.py +launch/api_client/model/docker_image_batch_job_bundle_v1_response.py +launch/api_client/model/endpoint_predict_v1_request.py +launch/api_client/model/file.py +launch/api_client/model/filtered_chat_completion_v2_request.py +launch/api_client/model/filtered_completion_v2_request.py +launch/api_client/model/function1.py +launch/api_client/model/function2.py +launch/api_client/model/function3.py +launch/api_client/model/function_call.py +launch/api_client/model/function_call2.py +launch/api_client/model/function_object.py +launch/api_client/model/function_parameters.py +launch/api_client/model/get_async_task_v1_response.py +launch/api_client/model/get_batch_completion_v2_response.py +launch/api_client/model/get_batch_job_v1_response.py +launch/api_client/model/get_docker_image_batch_job_v1_response.py +launch/api_client/model/get_file_content_response.py +launch/api_client/model/get_file_response.py +launch/api_client/model/get_fine_tune_events_response.py +launch/api_client/model/get_fine_tune_response.py +launch/api_client/model/get_llm_model_endpoint_v1_response.py +launch/api_client/model/get_model_endpoint_v1_response.py +launch/api_client/model/get_trigger_v1_response.py +launch/api_client/model/gpu_type.py +launch/api_client/model/http_validation_error.py +launch/api_client/model/image_url.py +launch/api_client/model/input_audio.py +launch/api_client/model/json_schema.py +launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py +launch/api_client/model/list_docker_image_batch_jobs_v1_response.py +launch/api_client/model/list_files_response.py +launch/api_client/model/list_fine_tunes_response.py +launch/api_client/model/list_llm_model_endpoints_v1_response.py +launch/api_client/model/list_model_bundles_v1_response.py +launch/api_client/model/list_model_bundles_v2_response.py +launch/api_client/model/list_model_endpoints_v1_response.py +launch/api_client/model/list_triggers_v1_response.py +launch/api_client/model/llm_fine_tune_event.py +launch/api_client/model/llm_inference_framework.py +launch/api_client/model/llm_source.py +launch/api_client/model/logprobs.py +launch/api_client/model/logprobs2.py +launch/api_client/model/metadata.py +launch/api_client/model/model_bundle_environment_params.py +launch/api_client/model/model_bundle_framework_type.py +launch/api_client/model/model_bundle_order_by.py +launch/api_client/model/model_bundle_packaging_type.py +launch/api_client/model/model_bundle_v1_response.py +launch/api_client/model/model_bundle_v2_response.py +launch/api_client/model/model_download_request.py +launch/api_client/model/model_download_response.py +launch/api_client/model/model_endpoint_deployment_state.py +launch/api_client/model/model_endpoint_order_by.py +launch/api_client/model/model_endpoint_resource_state.py +launch/api_client/model/model_endpoint_status.py +launch/api_client/model/model_endpoint_type.py +launch/api_client/model/parallel_tool_calls.py +launch/api_client/model/prediction_content.py +launch/api_client/model/prompt.py +launch/api_client/model/prompt1.py +launch/api_client/model/prompt1_item.py +launch/api_client/model/prompt_tokens_details.py +launch/api_client/model/pytorch_framework.py +launch/api_client/model/quantization.py +launch/api_client/model/reasoning_effort.py +launch/api_client/model/request_schema.py +launch/api_client/model/response_format_json_object.py +launch/api_client/model/response_format_json_schema.py +launch/api_client/model/response_format_json_schema_schema.py +launch/api_client/model/response_format_text.py +launch/api_client/model/response_modalities.py +launch/api_client/model/response_schema.py +launch/api_client/model/restart_model_endpoint_v1_response.py +launch/api_client/model/runnable_image_flavor.py +launch/api_client/model/service_tier.py +launch/api_client/model/stop_configuration.py +launch/api_client/model/stop_configuration1.py +launch/api_client/model/stream_error.py +launch/api_client/model/stream_error_content.py +launch/api_client/model/streaming_enhanced_runnable_image_flavor.py +launch/api_client/model/sync_endpoint_predict_v1_request.py +launch/api_client/model/sync_endpoint_predict_v1_response.py +launch/api_client/model/task_status.py +launch/api_client/model/tensorflow_framework.py +launch/api_client/model/token_output.py +launch/api_client/model/tool_config.py +launch/api_client/model/top_logprob.py +launch/api_client/model/triton_enhanced_runnable_image_flavor.py +launch/api_client/model/update_batch_completions_v2_request.py +launch/api_client/model/update_batch_completions_v2_response.py +launch/api_client/model/update_batch_job_v1_request.py +launch/api_client/model/update_batch_job_v1_response.py +launch/api_client/model/update_deep_speed_model_endpoint_request.py +launch/api_client/model/update_docker_image_batch_job_v1_request.py +launch/api_client/model/update_docker_image_batch_job_v1_response.py +launch/api_client/model/update_llm_model_endpoint_v1_request.py +launch/api_client/model/update_llm_model_endpoint_v1_response.py +launch/api_client/model/update_model_endpoint_v1_request.py +launch/api_client/model/update_model_endpoint_v1_response.py +launch/api_client/model/update_sg_lang_model_endpoint_request.py +launch/api_client/model/update_text_generation_inference_model_endpoint_request.py +launch/api_client/model/update_trigger_v1_request.py +launch/api_client/model/update_trigger_v1_response.py +launch/api_client/model/update_vllm_model_endpoint_request.py +launch/api_client/model/upload_file_response.py +launch/api_client/model/url_citation.py +launch/api_client/model/user_location.py +launch/api_client/model/validation_error.py +launch/api_client/model/voice_ids_shared.py +launch/api_client/model/web_search_context_size.py +launch/api_client/model/web_search_location.py +launch/api_client/model/web_search_options.py +launch/api_client/model/zip_artifact_flavor.py +launch/api_client/models/__init__.py +launch/api_client/rest.py +launch/api_client/schemas.py +launch/api_client/test/__init__.py +launch/api_client/test/test_models/__init__.py +launch/api_client_README.md diff --git a/.openapi-generator/VERSION b/.openapi-generator/VERSION new file mode 100644 index 00000000..c0be8a79 --- /dev/null +++ b/.openapi-generator/VERSION @@ -0,0 +1 @@ +6.4.0 \ No newline at end of file diff --git a/docs/models/Annotation.md b/docs/models/Annotation.md new file mode 100644 index 00000000..38d0a336 --- /dev/null +++ b/docs/models/Annotation.md @@ -0,0 +1,16 @@ +# launch.api_client.model.annotation.Annotation + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the URL citation. Always `url_citation`. | must be one of ["url_citation", ] +**url_citation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A URL citation when using web search. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Audio.md b/docs/models/Audio.md new file mode 100644 index 00000000..f44481a8 --- /dev/null +++ b/docs/models/Audio.md @@ -0,0 +1,15 @@ +# launch.api_client.model.audio.Audio + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Unique identifier for a previous audio response from the model. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Audio1.md b/docs/models/Audio1.md new file mode 100644 index 00000000..6d81db0a --- /dev/null +++ b/docs/models/Audio1.md @@ -0,0 +1,18 @@ +# launch.api_client.model.audio1.Audio1 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**expires_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations. | +**transcript** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Transcript of the audio generated by the model. | +**data** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Base64 encoded audio bytes generated by the model, in the format specified in the request. | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Unique identifier for this audio response. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Audio2.md b/docs/models/Audio2.md new file mode 100644 index 00000000..8abc7b01 --- /dev/null +++ b/docs/models/Audio2.md @@ -0,0 +1,16 @@ +# launch.api_client.model.audio2.Audio2 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**voice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The voice the model uses to respond. Supported voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`. | +**format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. | must be one of ["wav", "aac", "mp3", "flac", "opus", "pcm16", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/BatchCompletionsJob.md b/docs/models/BatchCompletionsJob.md new file mode 100644 index 00000000..4f064b93 --- /dev/null +++ b/docs/models/BatchCompletionsJob.md @@ -0,0 +1,24 @@ +# launch.api_client.model.batch_completions_job.BatchCompletionsJob + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**completed_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**expires_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model configuration for the batch inference. Hardware configurations are inferred. | +**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | +**status** | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | | +**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] +**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/BatchCompletionsJobStatus.md b/docs/models/BatchCompletionsJobStatus.md new file mode 100644 index 00000000..ea4c16e8 --- /dev/null +++ b/docs/models/BatchCompletionsJobStatus.md @@ -0,0 +1,9 @@ +# launch.api_client.model.batch_completions_job_status.BatchCompletionsJobStatus + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/BatchCompletionsModelConfig.md b/docs/models/BatchCompletionsModelConfig.md new file mode 100644 index 00000000..aae7b481 --- /dev/null +++ b/docs/models/BatchCompletionsModelConfig.md @@ -0,0 +1,55 @@ +# launch.api_client.model.batch_completions_model_config.BatchCompletionsModelConfig + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | +**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] +**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] +**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] +**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false +**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] +**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] +**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] +**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] +**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] +**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] +**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] +**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] +**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] +**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] +**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] +**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for the model. | [optional] +**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] +**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] +**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] +**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] +**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] +**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] +**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] +**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] +**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] +**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] +**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] +**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] +**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] +**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the checkpoint to load the model from. | [optional] +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Suggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config. System may decide to use a different number than the given value. | [optional] if omitted the server will use the default value of 1 +**max_context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len. | [optional] +**response_role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Role of the response in the conversation. Only supported in chat completions. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/BatchJobSerializationFormat.md b/docs/models/BatchJobSerializationFormat.md new file mode 100644 index 00000000..9f25654c --- /dev/null +++ b/docs/models/BatchJobSerializationFormat.md @@ -0,0 +1,9 @@ +# launch.api_client.model.batch_job_serialization_format.BatchJobSerializationFormat + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/BatchJobStatus.md b/docs/models/BatchJobStatus.md new file mode 100644 index 00000000..aa250ada --- /dev/null +++ b/docs/models/BatchJobStatus.md @@ -0,0 +1,9 @@ +# launch.api_client.model.batch_job_status.BatchJobStatus + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/BodyUploadFileV1FilesPost.md b/docs/models/BodyUploadFileV1FilesPost.md new file mode 100644 index 00000000..f6bcac2b --- /dev/null +++ b/docs/models/BodyUploadFileV1FilesPost.md @@ -0,0 +1,15 @@ +# launch.api_client.model.body_upload_file_v1_files_post.BodyUploadFileV1FilesPost + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**file** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CallbackAuth.md b/docs/models/CallbackAuth.md new file mode 100644 index 00000000..573a9437 --- /dev/null +++ b/docs/models/CallbackAuth.md @@ -0,0 +1,9 @@ +# launch.api_client.model.callback_auth.CallbackAuth + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CallbackBasicAuth.md b/docs/models/CallbackBasicAuth.md new file mode 100644 index 00000000..01b70697 --- /dev/null +++ b/docs/models/CallbackBasicAuth.md @@ -0,0 +1,17 @@ +# launch.api_client.model.callback_basic_auth.CallbackBasicAuth + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**password** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**kind** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["basic", ] +**username** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CallbackmTLSAuth.md b/docs/models/CallbackmTLSAuth.md new file mode 100644 index 00000000..35531dd1 --- /dev/null +++ b/docs/models/CallbackmTLSAuth.md @@ -0,0 +1,17 @@ +# launch.api_client.model.callbackm_tls_auth.CallbackmTLSAuth + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**kind** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["mtls", ] +**cert** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**key** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CancelBatchCompletionsV2Response.md b/docs/models/CancelBatchCompletionsV2Response.md new file mode 100644 index 00000000..2a15119e --- /dev/null +++ b/docs/models/CancelBatchCompletionsV2Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.cancel_batch_completions_v2_response.CancelBatchCompletionsV2Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether the cancellation was successful | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CancelFineTuneResponse.md b/docs/models/CancelFineTuneResponse.md new file mode 100644 index 00000000..f992928a --- /dev/null +++ b/docs/models/CancelFineTuneResponse.md @@ -0,0 +1,15 @@ +# launch.api_client.model.cancel_fine_tune_response.CancelFineTuneResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionFunctionCallOption.md b/docs/models/ChatCompletionFunctionCallOption.md new file mode 100644 index 00000000..cd4771cd --- /dev/null +++ b/docs/models/ChatCompletionFunctionCallOption.md @@ -0,0 +1,15 @@ +# launch.api_client.model.chat_completion_function_call_option.ChatCompletionFunctionCallOption + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionFunctions.md b/docs/models/ChatCompletionFunctions.md new file mode 100644 index 00000000..6bcf2f67 --- /dev/null +++ b/docs/models/ChatCompletionFunctions.md @@ -0,0 +1,17 @@ +# launch.api_client.model.chat_completion_functions.ChatCompletionFunctions + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +**description** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A description of what the function does, used by the model to choose when and how to call the function. | [optional] +**parameters** | [**FunctionParameters**](FunctionParameters.md) | [**FunctionParameters**](FunctionParameters.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionMessageToolCall.md b/docs/models/ChatCompletionMessageToolCall.md new file mode 100644 index 00000000..11d26184 --- /dev/null +++ b/docs/models/ChatCompletionMessageToolCall.md @@ -0,0 +1,17 @@ +# launch.api_client.model.chat_completion_message_tool_call.ChatCompletionMessageToolCall + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**function** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The function that the model called. | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The ID of the tool call. | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | must be one of ["function", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionMessageToolCallChunk.md b/docs/models/ChatCompletionMessageToolCallChunk.md new file mode 100644 index 00000000..a9da0672 --- /dev/null +++ b/docs/models/ChatCompletionMessageToolCallChunk.md @@ -0,0 +1,18 @@ +# launch.api_client.model.chat_completion_message_tool_call_chunk.ChatCompletionMessageToolCallChunk + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The ID of the tool call. | [optional] +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | [optional] must be one of ["function", ] +**function** | [**Function2**](Function2.md) | [**Function2**](Function2.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionMessageToolCallsInput.md b/docs/models/ChatCompletionMessageToolCallsInput.md new file mode 100644 index 00000000..7baf96ff --- /dev/null +++ b/docs/models/ChatCompletionMessageToolCallsInput.md @@ -0,0 +1,11 @@ +# launch.api_client.model.chat_completion_message_tool_calls_input.ChatCompletionMessageToolCallsInput + +The tool calls generated by the model, such as function calls. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionMessageToolCallsOutput.md b/docs/models/ChatCompletionMessageToolCallsOutput.md new file mode 100644 index 00000000..c2232b77 --- /dev/null +++ b/docs/models/ChatCompletionMessageToolCallsOutput.md @@ -0,0 +1,11 @@ +# launch.api_client.model.chat_completion_message_tool_calls_output.ChatCompletionMessageToolCallsOutput + +The tool calls generated by the model, such as function calls. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionNamedToolChoice.md b/docs/models/ChatCompletionNamedToolChoice.md new file mode 100644 index 00000000..dba91cc9 --- /dev/null +++ b/docs/models/ChatCompletionNamedToolChoice.md @@ -0,0 +1,16 @@ +# launch.api_client.model.chat_completion_named_tool_choice.ChatCompletionNamedToolChoice + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**function** | [**Function3**](Function3.md) | [**Function3**](Function3.md) | | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | must be one of ["function", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestAssistantMessage.md b/docs/models/ChatCompletionRequestAssistantMessage.md new file mode 100644 index 00000000..9b40c74a --- /dev/null +++ b/docs/models/ChatCompletionRequestAssistantMessage.md @@ -0,0 +1,21 @@ +# launch.api_client.model.chat_completion_request_assistant_message.ChatCompletionRequestAssistantMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `assistant`. | must be one of ["assistant", ] +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. | [optional] +**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message by the assistant. | [optional] +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] +**audio** | [**Audio**](Audio.md) | [**Audio**](Audio.md) | Data about a previous audio response from the model. [Learn more](/docs/guides/audio). | [optional] +**tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | [optional] +**function_call** | [**FunctionCall**](FunctionCall.md) | [**FunctionCall**](FunctionCall.md) | Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestAssistantMessageContentPart.md b/docs/models/ChatCompletionRequestAssistantMessageContentPart.md new file mode 100644 index 00000000..9981a8b3 --- /dev/null +++ b/docs/models/ChatCompletionRequestAssistantMessageContentPart.md @@ -0,0 +1,9 @@ +# launch.api_client.model.chat_completion_request_assistant_message_content_part.ChatCompletionRequestAssistantMessageContentPart + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestDeveloperMessage.md b/docs/models/ChatCompletionRequestDeveloperMessage.md new file mode 100644 index 00000000..3622a78d --- /dev/null +++ b/docs/models/ChatCompletionRequestDeveloperMessage.md @@ -0,0 +1,17 @@ +# launch.api_client.model.chat_completion_request_developer_message.ChatCompletionRequestDeveloperMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `developer`. | must be one of ["developer", ] +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the developer message. | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestFunctionMessage.md b/docs/models/ChatCompletionRequestFunctionMessage.md new file mode 100644 index 00000000..14304683 --- /dev/null +++ b/docs/models/ChatCompletionRequestFunctionMessage.md @@ -0,0 +1,17 @@ +# launch.api_client.model.chat_completion_request_function_message.ChatCompletionRequestFunctionMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `function`. | must be one of ["function", ] +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the function message. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestMessage.md b/docs/models/ChatCompletionRequestMessage.md new file mode 100644 index 00000000..3fc709f0 --- /dev/null +++ b/docs/models/ChatCompletionRequestMessage.md @@ -0,0 +1,9 @@ +# launch.api_client.model.chat_completion_request_message.ChatCompletionRequestMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestMessageContentPartAudio.md b/docs/models/ChatCompletionRequestMessageContentPartAudio.md new file mode 100644 index 00000000..b91cfa31 --- /dev/null +++ b/docs/models/ChatCompletionRequestMessageContentPartAudio.md @@ -0,0 +1,16 @@ +# launch.api_client.model.chat_completion_request_message_content_part_audio.ChatCompletionRequestMessageContentPartAudio + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**input_audio** | [**InputAudio**](InputAudio.md) | [**InputAudio**](InputAudio.md) | | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. Always `input_audio`. | must be one of ["input_audio", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestMessageContentPartFile.md b/docs/models/ChatCompletionRequestMessageContentPartFile.md new file mode 100644 index 00000000..3580290a --- /dev/null +++ b/docs/models/ChatCompletionRequestMessageContentPartFile.md @@ -0,0 +1,16 @@ +# launch.api_client.model.chat_completion_request_message_content_part_file.ChatCompletionRequestMessageContentPartFile + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**file** | [**File**](File.md) | [**File**](File.md) | | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. Always `file`. | must be one of ["file", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestMessageContentPartImage.md b/docs/models/ChatCompletionRequestMessageContentPartImage.md new file mode 100644 index 00000000..0efc771b --- /dev/null +++ b/docs/models/ChatCompletionRequestMessageContentPartImage.md @@ -0,0 +1,16 @@ +# launch.api_client.model.chat_completion_request_message_content_part_image.ChatCompletionRequestMessageContentPartImage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**image_url** | [**ImageUrl**](ImageUrl.md) | [**ImageUrl**](ImageUrl.md) | | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. | must be one of ["image_url", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestMessageContentPartRefusal.md b/docs/models/ChatCompletionRequestMessageContentPartRefusal.md new file mode 100644 index 00000000..cda03882 --- /dev/null +++ b/docs/models/ChatCompletionRequestMessageContentPartRefusal.md @@ -0,0 +1,16 @@ +# launch.api_client.model.chat_completion_request_message_content_part_refusal.ChatCompletionRequestMessageContentPartRefusal + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message generated by the model. | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. | must be one of ["refusal", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestMessageContentPartText.md b/docs/models/ChatCompletionRequestMessageContentPartText.md new file mode 100644 index 00000000..f973e48b --- /dev/null +++ b/docs/models/ChatCompletionRequestMessageContentPartText.md @@ -0,0 +1,16 @@ +# launch.api_client.model.chat_completion_request_message_content_part_text.ChatCompletionRequestMessageContentPartText + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The text content. | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the content part. | must be one of ["text", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestSystemMessage.md b/docs/models/ChatCompletionRequestSystemMessage.md new file mode 100644 index 00000000..89df8eb9 --- /dev/null +++ b/docs/models/ChatCompletionRequestSystemMessage.md @@ -0,0 +1,17 @@ +# launch.api_client.model.chat_completion_request_system_message.ChatCompletionRequestSystemMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `system`. | must be one of ["system", ] +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the system message. | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestSystemMessageContentPart.md b/docs/models/ChatCompletionRequestSystemMessageContentPart.md new file mode 100644 index 00000000..d28d1965 --- /dev/null +++ b/docs/models/ChatCompletionRequestSystemMessageContentPart.md @@ -0,0 +1,9 @@ +# launch.api_client.model.chat_completion_request_system_message_content_part.ChatCompletionRequestSystemMessageContentPart + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestToolMessage.md b/docs/models/ChatCompletionRequestToolMessage.md new file mode 100644 index 00000000..0ae3cadc --- /dev/null +++ b/docs/models/ChatCompletionRequestToolMessage.md @@ -0,0 +1,17 @@ +# launch.api_client.model.chat_completion_request_tool_message.ChatCompletionRequestToolMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `tool`. | must be one of ["tool", ] +**tool_call_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call that this message is responding to. | +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the tool message. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestToolMessageContentPart.md b/docs/models/ChatCompletionRequestToolMessageContentPart.md new file mode 100644 index 00000000..1f254fac --- /dev/null +++ b/docs/models/ChatCompletionRequestToolMessageContentPart.md @@ -0,0 +1,9 @@ +# launch.api_client.model.chat_completion_request_tool_message_content_part.ChatCompletionRequestToolMessageContentPart + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestUserMessage.md b/docs/models/ChatCompletionRequestUserMessage.md new file mode 100644 index 00000000..e48395ec --- /dev/null +++ b/docs/models/ChatCompletionRequestUserMessage.md @@ -0,0 +1,17 @@ +# launch.api_client.model.chat_completion_request_user_message.ChatCompletionRequestUserMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the messages author, in this case `user`. | must be one of ["user", ] +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the user message. | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionRequestUserMessageContentPart.md b/docs/models/ChatCompletionRequestUserMessageContentPart.md new file mode 100644 index 00000000..ec541402 --- /dev/null +++ b/docs/models/ChatCompletionRequestUserMessageContentPart.md @@ -0,0 +1,9 @@ +# launch.api_client.model.chat_completion_request_user_message_content_part.ChatCompletionRequestUserMessageContentPart + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionResponseMessage.md b/docs/models/ChatCompletionResponseMessage.md new file mode 100644 index 00000000..515ccbeb --- /dev/null +++ b/docs/models/ChatCompletionResponseMessage.md @@ -0,0 +1,21 @@ +# launch.api_client.model.chat_completion_response_message.ChatCompletionResponseMessage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the author of this message. | must be one of ["assistant", ] +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the message. | [optional] +**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message generated by the model. | [optional] +**tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tool calls generated by the model, such as function calls. | [optional] +**annotations** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Annotations for the message, when applicable, as when using the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] +**function_call** | [**FunctionCall**](FunctionCall.md) | [**FunctionCall**](FunctionCall.md) | Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | [optional] +**audio** | [**Audio1**](Audio1.md) | [**Audio1**](Audio1.md) | If the audio output modality is requested, this object contains data about the audio response from the model. [Learn more](/docs/guides/audio). | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionStreamOptions.md b/docs/models/ChatCompletionStreamOptions.md new file mode 100644 index 00000000..97279a28 --- /dev/null +++ b/docs/models/ChatCompletionStreamOptions.md @@ -0,0 +1,15 @@ +# launch.api_client.model.chat_completion_stream_options.ChatCompletionStreamOptions + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**include_usage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. **NOTE:** If the stream is interrupted, you may not receive the final usage chunk which contains the total token usage for the request. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionStreamResponseDelta.md b/docs/models/ChatCompletionStreamResponseDelta.md new file mode 100644 index 00000000..c36d2c02 --- /dev/null +++ b/docs/models/ChatCompletionStreamResponseDelta.md @@ -0,0 +1,19 @@ +# launch.api_client.model.chat_completion_stream_response_delta.ChatCompletionStreamResponseDelta + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The contents of the chunk message. | [optional] +**function_call** | [**FunctionCall2**](FunctionCall2.md) | [**FunctionCall2**](FunctionCall2.md) | Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | [optional] +**tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The role of the author of this message. | [optional] must be one of ["developer", "system", "user", "assistant", "tool", ] +**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The refusal message generated by the model. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionTokenLogprob.md b/docs/models/ChatCompletionTokenLogprob.md new file mode 100644 index 00000000..edb7ebf3 --- /dev/null +++ b/docs/models/ChatCompletionTokenLogprob.md @@ -0,0 +1,18 @@ +# launch.api_client.model.chat_completion_token_logprob.ChatCompletionTokenLogprob + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. | +**logprob** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | +**bytes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. | +**token** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The token. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionTool.md b/docs/models/ChatCompletionTool.md new file mode 100644 index 00000000..4e3d657a --- /dev/null +++ b/docs/models/ChatCompletionTool.md @@ -0,0 +1,16 @@ +# launch.api_client.model.chat_completion_tool.ChatCompletionTool + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**function** | [**FunctionObject**](FunctionObject.md) | [**FunctionObject**](FunctionObject.md) | | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the tool. Currently, only `function` is supported. | must be one of ["function", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionToolChoiceOption.md b/docs/models/ChatCompletionToolChoiceOption.md new file mode 100644 index 00000000..4f1d2f7d --- /dev/null +++ b/docs/models/ChatCompletionToolChoiceOption.md @@ -0,0 +1,11 @@ +# launch.api_client.model.chat_completion_tool_choice_option.ChatCompletionToolChoiceOption + +Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionV2Request.md b/docs/models/ChatCompletionV2Request.md new file mode 100644 index 00000000..79ae9210 --- /dev/null +++ b/docs/models/ChatCompletionV2Request.md @@ -0,0 +1,72 @@ +# launch.api_client.model.chat_completion_v2_request.ChatCompletionV2Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**messages** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of messages comprising the conversation so far. Depending on the [model](/docs/models) you use, different message types (modalities) are supported, like [text](/docs/guides/text-generation), [images](/docs/guides/vision), and [audio](/docs/guides/audio). | +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | +**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of output sequences that are generated from the prompt. From these `best_of` sequences, the top `n` sequences are returned. `best_of` must be greater than or equal to `n`. This is treated as the beam width when `use_beam_search` is True. By default, `best_of` is set to `n`. | [optional] +**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] +**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] +**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] +**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] +**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] +**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] +**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] +**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. Defaults to False. | [optional] +**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] +**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] +**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the new message will be prepended with the last message if they belong to the same role. | [optional] +**add_generation_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model. | [optional] +**continue_final_message** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`. | [optional] +**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default). | [optional] +**documents** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given | [optional] +**chat_template_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Additional kwargs to pass to the template renderer. Will be accessible by the chat template. | [optional] +**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] +**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] +**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] +**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] +**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] +**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] +**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling. | [optional] +**metadata** | [**Metadata**](Metadata.md) | [**Metadata**](Metadata.md) | | [optional] +**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 +**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 +**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] +**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] +**modalities** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | [optional] +**reasoning_effort** | [**ReasoningEffort**](ReasoningEffort.md) | [**ReasoningEffort**](ReasoningEffort.md) | | [optional] +**max_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] +**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. | [optional] if omitted the server will use the default value of 0 +**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | [optional] if omitted the server will use the default value of 0 +**web_search_options** | [**WebSearchOptions**](WebSearchOptions.md) | [**WebSearchOptions**](WebSearchOptions.md) | This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] +**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | [optional] +**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An object specifying the format that the model must output. Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. | [optional] +**audio** | [**Audio2**](Audio2.md) | [**Audio2**](Audio2.md) | Parameters for audio output. Required when audio output is requested with `modalities: [\"audio\"]`. [Learn more](/docs/guides/audio). | [optional] +**store** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether or not to store the output of this chat completion request for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products. | [optional] if omitted the server will use the default value of false +**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). | [optional] if omitted the server will use the default value of false +**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] +**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | [optional] +**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. | [optional] if omitted the server will use the default value of false +**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o-series models](/docs/guides/reasoning). | [optional] +**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. | [optional] if omitted the server will use the default value of 1 +**prediction** | [**PredictionContent**](PredictionContent.md) | [**PredictionContent**](PredictionContent.md) | Configuration for a [Predicted Output](/docs/guides/predicted-outputs), which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. | [optional] +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] +**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] +**tools** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. | [optional] +**tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. | [optional] +**parallel_tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | [optional] +**function_call** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. | [optional] +**functions** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ChatCompletionV2StreamErrorChunk.md b/docs/models/ChatCompletionV2StreamErrorChunk.md new file mode 100644 index 00000000..339fe97f --- /dev/null +++ b/docs/models/ChatCompletionV2StreamErrorChunk.md @@ -0,0 +1,15 @@ +# launch.api_client.model.chat_completion_v2_stream_error_chunk.ChatCompletionV2StreamErrorChunk + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**error** | [**StreamError**](StreamError.md) | [**StreamError**](StreamError.md) | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Choice.md b/docs/models/Choice.md new file mode 100644 index 00000000..f6e21930 --- /dev/null +++ b/docs/models/Choice.md @@ -0,0 +1,18 @@ +# launch.api_client.model.choice.Choice + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**finish_reason** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. | must be one of ["stop", "length", "tool_calls", "content_filter", "function_call", ] +**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the choice in the list of choices. | +**message** | [**ChatCompletionResponseMessage**](ChatCompletionResponseMessage.md) | [**ChatCompletionResponseMessage**](ChatCompletionResponseMessage.md) | | +**logprobs** | [**Logprobs**](Logprobs.md) | [**Logprobs**](Logprobs.md) | Log probability information for the choice. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Choice1.md b/docs/models/Choice1.md new file mode 100644 index 00000000..74991b30 --- /dev/null +++ b/docs/models/Choice1.md @@ -0,0 +1,18 @@ +# launch.api_client.model.choice1.Choice1 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**finish_reason** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. | must be one of ["stop", "length", "tool_calls", "content_filter", "function_call", ] +**delta** | [**ChatCompletionStreamResponseDelta**](ChatCompletionStreamResponseDelta.md) | [**ChatCompletionStreamResponseDelta**](ChatCompletionStreamResponseDelta.md) | | +**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the choice in the list of choices. | +**logprobs** | [**Logprobs**](Logprobs.md) | [**Logprobs**](Logprobs.md) | Log probability information for the choice. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Choice2.md b/docs/models/Choice2.md new file mode 100644 index 00000000..36306adf --- /dev/null +++ b/docs/models/Choice2.md @@ -0,0 +1,18 @@ +# launch.api_client.model.choice2.Choice2 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**finish_reason** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. | must be one of ["stop", "length", "content_filter", ] +**index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**logprobs** | [**Logprobs2**](Logprobs2.md) | [**Logprobs2**](Logprobs2.md) | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CloneModelBundleV1Request.md b/docs/models/CloneModelBundleV1Request.md new file mode 100644 index 00000000..5ca434da --- /dev/null +++ b/docs/models/CloneModelBundleV1Request.md @@ -0,0 +1,18 @@ +# launch.api_client.model.clone_model_bundle_v1_request.CloneModelBundleV1Request + +Request object for cloning a Model Bundle from another one. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for cloning a Model Bundle from another one. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**original_model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**new_app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CloneModelBundleV2Request.md b/docs/models/CloneModelBundleV2Request.md new file mode 100644 index 00000000..3b09c946 --- /dev/null +++ b/docs/models/CloneModelBundleV2Request.md @@ -0,0 +1,18 @@ +# launch.api_client.model.clone_model_bundle_v2_request.CloneModelBundleV2Request + +Request object for cloning a Model Bundle from another one. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for cloning a Model Bundle from another one. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**original_model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**new_app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CloudpickleArtifactFlavor.md b/docs/models/CloudpickleArtifactFlavor.md new file mode 100644 index 00000000..aee1338a --- /dev/null +++ b/docs/models/CloudpickleArtifactFlavor.md @@ -0,0 +1,23 @@ +# launch.api_client.model.cloudpickle_artifact_flavor.CloudpickleArtifactFlavor + +This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["cloudpickle_artifact", ] +**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**load_model_fn** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**load_predict_fn** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionOutput.md b/docs/models/CompletionOutput.md new file mode 100644 index 00000000..a384f964 --- /dev/null +++ b/docs/models/CompletionOutput.md @@ -0,0 +1,20 @@ +# launch.api_client.model.completion_output.CompletionOutput + +Represents the output of a completion request to a model. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Represents the output of a completion request to a model. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**num_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**num_prompt_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionStreamOutput.md b/docs/models/CompletionStreamOutput.md new file mode 100644 index 00000000..47db1595 --- /dev/null +++ b/docs/models/CompletionStreamOutput.md @@ -0,0 +1,19 @@ +# launch.api_client.model.completion_stream_output.CompletionStreamOutput + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**finished** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**text** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**num_prompt_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**token** | [**TokenOutput**](TokenOutput.md) | [**TokenOutput**](TokenOutput.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionStreamV1Request.md b/docs/models/CompletionStreamV1Request.md new file mode 100644 index 00000000..15a58483 --- /dev/null +++ b/docs/models/CompletionStreamV1Request.md @@ -0,0 +1,31 @@ +# launch.api_client.model.completion_stream_v1_request.CompletionStreamV1Request + +Request object for a stream prompt completion task. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for a stream prompt completion task. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**max_new_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**stop_sequences** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**return_token_log_probs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionStreamV1Response.md b/docs/models/CompletionStreamV1Response.md new file mode 100644 index 00000000..4e784563 --- /dev/null +++ b/docs/models/CompletionStreamV1Response.md @@ -0,0 +1,19 @@ +# launch.api_client.model.completion_stream_v1_response.CompletionStreamV1Response + +Error of the response (if any). + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Error of the response (if any). | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**request_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**output** | [**CompletionStreamOutput**](CompletionStreamOutput.md) | [**CompletionStreamOutput**](CompletionStreamOutput.md) | | [optional] +**error** | [**StreamError**](StreamError.md) | [**StreamError**](StreamError.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionSyncV1Request.md b/docs/models/CompletionSyncV1Request.md new file mode 100644 index 00000000..540f650d --- /dev/null +++ b/docs/models/CompletionSyncV1Request.md @@ -0,0 +1,31 @@ +# launch.api_client.model.completion_sync_v1_request.CompletionSyncV1Request + +Request object for a synchronous prompt completion task. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for a synchronous prompt completion task. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**max_new_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**stop_sequences** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**return_token_log_probs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionSyncV1Response.md b/docs/models/CompletionSyncV1Response.md new file mode 100644 index 00000000..7d977144 --- /dev/null +++ b/docs/models/CompletionSyncV1Response.md @@ -0,0 +1,18 @@ +# launch.api_client.model.completion_sync_v1_response.CompletionSyncV1Response + +Response object for a synchronous prompt completion. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for a synchronous prompt completion. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**request_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**output** | [**CompletionOutput**](CompletionOutput.md) | [**CompletionOutput**](CompletionOutput.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionTokensDetails.md b/docs/models/CompletionTokensDetails.md new file mode 100644 index 00000000..631527f5 --- /dev/null +++ b/docs/models/CompletionTokensDetails.md @@ -0,0 +1,18 @@ +# launch.api_client.model.completion_tokens_details.CompletionTokensDetails + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**accepted_prediction_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion. | [optional] if omitted the server will use the default value of 0 +**audio_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Audio input tokens generated by the model. | [optional] if omitted the server will use the default value of 0 +**reasoning_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokens generated by the model for reasoning. | [optional] if omitted the server will use the default value of 0 +**rejected_prediction_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion. However, like reasoning tokens, these tokens are still counted in the total completion tokens for purposes of billing, output, and context window limits. | [optional] if omitted the server will use the default value of 0 +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionUsage.md b/docs/models/CompletionUsage.md new file mode 100644 index 00000000..7902b33d --- /dev/null +++ b/docs/models/CompletionUsage.md @@ -0,0 +1,19 @@ +# launch.api_client.model.completion_usage.CompletionUsage + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tokens in the generated completion. | +**prompt_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tokens in the prompt. | +**total_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Total number of tokens used in the request (prompt + completion). | +**completion_tokens_details** | [**CompletionTokensDetails**](CompletionTokensDetails.md) | [**CompletionTokensDetails**](CompletionTokensDetails.md) | Breakdown of tokens used in a completion. | [optional] +**prompt_tokens_details** | [**PromptTokensDetails**](PromptTokensDetails.md) | [**PromptTokensDetails**](PromptTokensDetails.md) | Breakdown of tokens used in the prompt. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionV2Request.md b/docs/models/CompletionV2Request.md new file mode 100644 index 00000000..1e29a3d8 --- /dev/null +++ b/docs/models/CompletionV2Request.md @@ -0,0 +1,52 @@ +# launch.api_client.model.completion_v2_request.CompletionV2Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | +**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | +**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 +**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] +**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] +**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] +**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] +**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] +**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] +**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] +**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. | [optional] +**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] +**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] +**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true (the default), special tokens (e.g. BOS) will be added to the prompt. | [optional] +**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported. | [optional] +**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] +**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] +**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] +**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] +**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] +**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] +**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Echo back the prompt in addition to the completion | [optional] if omitted the server will use the default value of false +**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 +**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated. | [optional] +**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. | [optional] +**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. | [optional] if omitted the server will use the default value of 16 +**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 +**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] +**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] +**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). | [optional] if omitted the server will use the default value of false +**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] +**suffix** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The suffix that comes after a completion of inserted text. This parameter is only supported for `gpt-3.5-turbo-instruct`. | [optional] +**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 +**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 +**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CompletionV2StreamErrorChunk.md b/docs/models/CompletionV2StreamErrorChunk.md new file mode 100644 index 00000000..bb469573 --- /dev/null +++ b/docs/models/CompletionV2StreamErrorChunk.md @@ -0,0 +1,15 @@ +# launch.api_client.model.completion_v2_stream_error_chunk.CompletionV2StreamErrorChunk + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**error** | [**StreamError**](StreamError.md) | [**StreamError**](StreamError.md) | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Content.md b/docs/models/Content.md new file mode 100644 index 00000000..789d858e --- /dev/null +++ b/docs/models/Content.md @@ -0,0 +1,11 @@ +# launch.api_client.model.content.Content + +An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Content1.md b/docs/models/Content1.md new file mode 100644 index 00000000..dfab0eef --- /dev/null +++ b/docs/models/Content1.md @@ -0,0 +1,11 @@ +# launch.api_client.model.content1.Content1 + +An array of content parts with a defined type. For developer messages, only type `text` is supported. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. For developer messages, only type `text` is supported. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Content2.md b/docs/models/Content2.md new file mode 100644 index 00000000..5d1c6ff3 --- /dev/null +++ b/docs/models/Content2.md @@ -0,0 +1,11 @@ +# launch.api_client.model.content2.Content2 + +An array of content parts with a defined type. For system messages, only type `text` is supported. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. For system messages, only type `text` is supported. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Content3.md b/docs/models/Content3.md new file mode 100644 index 00000000..6cb17b17 --- /dev/null +++ b/docs/models/Content3.md @@ -0,0 +1,11 @@ +# launch.api_client.model.content3.Content3 + +An array of content parts with a defined type. For tool messages, only type `text` is supported. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. For tool messages, only type `text` is supported. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Content4.md b/docs/models/Content4.md new file mode 100644 index 00000000..918af8f8 --- /dev/null +++ b/docs/models/Content4.md @@ -0,0 +1,11 @@ +# launch.api_client.model.content4.Content4 + +An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Content8.md b/docs/models/Content8.md new file mode 100644 index 00000000..9a82ed37 --- /dev/null +++ b/docs/models/Content8.md @@ -0,0 +1,11 @@ +# launch.api_client.model.content8.Content8 + +An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateAsyncTaskV1Response.md b/docs/models/CreateAsyncTaskV1Response.md new file mode 100644 index 00000000..ccd80047 --- /dev/null +++ b/docs/models/CreateAsyncTaskV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_async_task_v1_response.CreateAsyncTaskV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchCompletionsV1ModelConfig.md b/docs/models/CreateBatchCompletionsV1ModelConfig.md new file mode 100644 index 00000000..0673637c --- /dev/null +++ b/docs/models/CreateBatchCompletionsV1ModelConfig.md @@ -0,0 +1,56 @@ +# launch.api_client.model.create_batch_completions_v1_model_config.CreateBatchCompletionsV1ModelConfig + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the model to use. | +**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] +**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] +**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] +**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false +**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] +**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] +**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] +**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] +**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] +**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] +**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] +**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] +**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] +**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] +**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] +**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for the model. | [optional] +**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] +**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] +**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] +**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] +**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] +**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] +**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] +**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] +**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] +**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] +**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] +**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] +**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] +**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the checkpoint to load the model from. | [optional] +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Suggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config. System may decide to use a different number than the given value. | [optional] if omitted the server will use the default value of 1 +**max_context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len. | [optional] +**response_role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Role of the response in the conversation. Only supported in chat completions. | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Labels to attach to the batch inference job. | [optional] if omitted the server will use the default value of {} +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchCompletionsV1Request.md b/docs/models/CreateBatchCompletionsV1Request.md new file mode 100644 index 00000000..b544eac0 --- /dev/null +++ b/docs/models/CreateBatchCompletionsV1Request.md @@ -0,0 +1,31 @@ +# launch.api_client.model.create_batch_completions_v1_request.CreateBatchCompletionsV1Request + +Request object for batch completions. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for batch completions. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_config** | [**CreateBatchCompletionsV1ModelConfig**](CreateBatchCompletionsV1ModelConfig.md) | [**CreateBatchCompletionsV1ModelConfig**](CreateBatchCompletionsV1ModelConfig.md) | | +**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | +**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Labels to attach to the batch inference job. | [optional] if omitted the server will use the default value of {} +**data_parallelism** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference. | [optional] if omitted the server will use the default value of 1 +**max_runtime_sec** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum runtime of the batch inference in seconds. Default to one day. | [optional] if omitted the server will use the default value of 86400 +**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] +**tool_config** | [**ToolConfig**](ToolConfig.md) | [**ToolConfig**](ToolConfig.md) | Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | CPUs to use for the batch inference. | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPUs to use for the batch inference. | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Amount of memory to use for the batch inference. | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | GPU type to use for the batch inference. | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Storage to use for the batch inference. | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of nodes per worker for the batch inference. | [optional] +**content** | [**CreateBatchCompletionsV1RequestContent**](CreateBatchCompletionsV1RequestContent.md) | [**CreateBatchCompletionsV1RequestContent**](CreateBatchCompletionsV1RequestContent.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchCompletionsV1RequestContent.md b/docs/models/CreateBatchCompletionsV1RequestContent.md new file mode 100644 index 00000000..92b4fd41 --- /dev/null +++ b/docs/models/CreateBatchCompletionsV1RequestContent.md @@ -0,0 +1,24 @@ +# launch.api_client.model.create_batch_completions_v1_request_content.CreateBatchCompletionsV1RequestContent + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**max_new_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**prompts** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**stop_sequences** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**return_token_log_probs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchCompletionsV1Response.md b/docs/models/CreateBatchCompletionsV1Response.md new file mode 100644 index 00000000..78970ff2 --- /dev/null +++ b/docs/models/CreateBatchCompletionsV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_batch_completions_v1_response.CreateBatchCompletionsV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchCompletionsV2Request.md b/docs/models/CreateBatchCompletionsV2Request.md new file mode 100644 index 00000000..5209757f --- /dev/null +++ b/docs/models/CreateBatchCompletionsV2Request.md @@ -0,0 +1,31 @@ +# launch.api_client.model.create_batch_completions_v2_request.CreateBatchCompletionsV2Request + +Request object for batch completions. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for batch completions. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model configuration for the batch inference. Hardware configurations are inferred. | +**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | +**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Labels to attach to the batch inference job. | [optional] if omitted the server will use the default value of {} +**data_parallelism** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference. | [optional] if omitted the server will use the default value of 1 +**max_runtime_sec** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum runtime of the batch inference in seconds. Default to one day. | [optional] if omitted the server will use the default value of 86400 +**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] +**tool_config** | [**ToolConfig**](ToolConfig.md) | [**ToolConfig**](ToolConfig.md) | Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | CPUs to use for the batch inference. | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPUs to use for the batch inference. | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Amount of memory to use for the batch inference. | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | GPU type to use for the batch inference. | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Storage to use for the batch inference. | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of nodes per worker for the batch inference. | [optional] +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Either `input_data_path` or `content` needs to be provided. When input_data_path is provided, the input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchJobResourceRequests.md b/docs/models/CreateBatchJobResourceRequests.md new file mode 100644 index 00000000..766c1630 --- /dev/null +++ b/docs/models/CreateBatchJobResourceRequests.md @@ -0,0 +1,22 @@ +# launch.api_client.model.create_batch_job_resource_requests.CreateBatchJobResourceRequests + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchJobV1Request.md b/docs/models/CreateBatchJobV1Request.md new file mode 100644 index 00000000..2874eaf8 --- /dev/null +++ b/docs/models/CreateBatchJobV1Request.md @@ -0,0 +1,20 @@ +# launch.api_client.model.create_batch_job_v1_request.CreateBatchJobV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**resource_requests** | [**CreateBatchJobResourceRequests**](CreateBatchJobResourceRequests.md) | [**CreateBatchJobResourceRequests**](CreateBatchJobResourceRequests.md) | | +**serialization_format** | [**BatchJobSerializationFormat**](BatchJobSerializationFormat.md) | [**BatchJobSerializationFormat**](BatchJobSerializationFormat.md) | | +**input_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**timeout_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 43200.0 +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateBatchJobV1Response.md b/docs/models/CreateBatchJobV1Response.md new file mode 100644 index 00000000..2591a24f --- /dev/null +++ b/docs/models/CreateBatchJobV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_batch_job_v1_response.CreateBatchJobV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateChatCompletionResponse.md b/docs/models/CreateChatCompletionResponse.md new file mode 100644 index 00000000..5011124e --- /dev/null +++ b/docs/models/CreateChatCompletionResponse.md @@ -0,0 +1,22 @@ +# launch.api_client.model.create_chat_completion_response.CreateChatCompletionResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**created** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) of when the chat completion was created. | +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model used for the chat completion. | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier for the chat completion. | +**choices** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of chat completion choices. Can be more than one if `n` is greater than 1. | +**object** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The object type, which is always `chat.completion`. | must be one of ["chat.completion", ] +**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] +**system_fingerprint** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. | [optional] +**usage** | [**CompletionUsage**](CompletionUsage.md) | [**CompletionUsage**](CompletionUsage.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateChatCompletionStreamResponse.md b/docs/models/CreateChatCompletionStreamResponse.md new file mode 100644 index 00000000..ff4b84df --- /dev/null +++ b/docs/models/CreateChatCompletionStreamResponse.md @@ -0,0 +1,22 @@ +# launch.api_client.model.create_chat_completion_stream_response.CreateChatCompletionStreamResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**created** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. | +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model to generate the completion. | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier for the chat completion. Each chunk has the same ID. | +**choices** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`. | +**object** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The object type, which is always `chat.completion.chunk`. | must be one of ["chat.completion.chunk", ] +**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] +**system_fingerprint** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. | [optional] +**usage** | [**CompletionUsage**](CompletionUsage.md) | [**CompletionUsage**](CompletionUsage.md) | An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request. When present, it contains a null value **except for the last chunk** which contains the token usage statistics for the entire request. **NOTE:** If the stream is interrupted or cancelled, you may not receive the final usage chunk which contains the total token usage for the request. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateCompletionResponse.md b/docs/models/CreateCompletionResponse.md new file mode 100644 index 00000000..9f64ce59 --- /dev/null +++ b/docs/models/CreateCompletionResponse.md @@ -0,0 +1,21 @@ +# launch.api_client.model.create_completion_response.CreateCompletionResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**created** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Unix timestamp (in seconds) of when the completion was created. | +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model used for completion. | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier for the completion. | +**choices** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The list of completion choices the model generated for the input prompt. | +**object** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The object type, which is always \"text_completion\" | must be one of ["text_completion", ] +**system_fingerprint** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. | [optional] +**usage** | [**CompletionUsage**](CompletionUsage.md) | [**CompletionUsage**](CompletionUsage.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateDeepSpeedModelEndpointRequest.md b/docs/models/CreateDeepSpeedModelEndpointRequest.md new file mode 100644 index 00000000..7e5db065 --- /dev/null +++ b/docs/models/CreateDeepSpeedModelEndpointRequest.md @@ -0,0 +1,44 @@ +# launch.api_client.model.create_deep_speed_model_endpoint_request.CreateDeepSpeedModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["deepspeed", ] if omitted the server will use the default value of deepspeed +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateDockerImageBatchJobBundleV1Request.md b/docs/models/CreateDockerImageBatchJobBundleV1Request.md new file mode 100644 index 00000000..bfb65c5f --- /dev/null +++ b/docs/models/CreateDockerImageBatchJobBundleV1Request.md @@ -0,0 +1,22 @@ +# launch.api_client.model.create_docker_image_batch_job_bundle_v1_request.CreateDockerImageBatchJobBundleV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**image_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of {} +**mount_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**resource_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of {} +**public** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateDockerImageBatchJobBundleV1Response.md b/docs/models/CreateDockerImageBatchJobBundleV1Response.md new file mode 100644 index 00000000..ea485921 --- /dev/null +++ b/docs/models/CreateDockerImageBatchJobBundleV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_docker_image_batch_job_bundle_v1_response.CreateDockerImageBatchJobBundleV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**docker_image_batch_job_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateDockerImageBatchJobResourceRequests.md b/docs/models/CreateDockerImageBatchJobResourceRequests.md new file mode 100644 index 00000000..f38701b3 --- /dev/null +++ b/docs/models/CreateDockerImageBatchJobResourceRequests.md @@ -0,0 +1,20 @@ +# launch.api_client.model.create_docker_image_batch_job_resource_requests.CreateDockerImageBatchJobResourceRequests + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateDockerImageBatchJobV1Request.md b/docs/models/CreateDockerImageBatchJobV1Request.md new file mode 100644 index 00000000..69232d1c --- /dev/null +++ b/docs/models/CreateDockerImageBatchJobV1Request.md @@ -0,0 +1,20 @@ +# launch.api_client.model.create_docker_image_batch_job_v1_request.CreateDockerImageBatchJobV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**docker_image_batch_job_bundle_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**docker_image_batch_job_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**job_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**resource_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of {} +**override_job_max_runtime_s** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateDockerImageBatchJobV1Response.md b/docs/models/CreateDockerImageBatchJobV1Response.md new file mode 100644 index 00000000..c5bcef34 --- /dev/null +++ b/docs/models/CreateDockerImageBatchJobV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_docker_image_batch_job_v1_response.CreateDockerImageBatchJobV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateFineTuneRequest.md b/docs/models/CreateFineTuneRequest.md new file mode 100644 index 00000000..5e0a72b3 --- /dev/null +++ b/docs/models/CreateFineTuneRequest.md @@ -0,0 +1,20 @@ +# launch.api_client.model.create_fine_tune_request.CreateFineTuneRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**training_file** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**hyperparameters** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**validation_file** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**suffix** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**wandb_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateFineTuneResponse.md b/docs/models/CreateFineTuneResponse.md new file mode 100644 index 00000000..b1723a07 --- /dev/null +++ b/docs/models/CreateFineTuneResponse.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_fine_tune_response.CreateFineTuneResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateLLMModelEndpointV1Request.md b/docs/models/CreateLLMModelEndpointV1Request.md new file mode 100644 index 00000000..9b73d5c3 --- /dev/null +++ b/docs/models/CreateLLMModelEndpointV1Request.md @@ -0,0 +1,9 @@ +# launch.api_client.model.create_llm_model_endpoint_v1_request.CreateLLMModelEndpointV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateLLMModelEndpointV1Response.md b/docs/models/CreateLLMModelEndpointV1Response.md new file mode 100644 index 00000000..521eecce --- /dev/null +++ b/docs/models/CreateLLMModelEndpointV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_llm_model_endpoint_v1_response.CreateLLMModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateLightLLMModelEndpointRequest.md b/docs/models/CreateLightLLMModelEndpointRequest.md new file mode 100644 index 00000000..501943ab --- /dev/null +++ b/docs/models/CreateLightLLMModelEndpointRequest.md @@ -0,0 +1,44 @@ +# launch.api_client.model.create_light_llm_model_endpoint_request.CreateLightLLMModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["lightllm", ] if omitted the server will use the default value of lightllm +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateModelBundleV1Request.md b/docs/models/CreateModelBundleV1Request.md new file mode 100644 index 00000000..a8bc8ff2 --- /dev/null +++ b/docs/models/CreateModelBundleV1Request.md @@ -0,0 +1,24 @@ +# launch.api_client.model.create_model_bundle_v1_request.CreateModelBundleV1Request + +Request object for creating a Model Bundle. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for creating a Model Bundle. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**packaging_type** | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**env_params** | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | | +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateModelBundleV1Response.md b/docs/models/CreateModelBundleV1Response.md new file mode 100644 index 00000000..e23c7ee6 --- /dev/null +++ b/docs/models/CreateModelBundleV1Response.md @@ -0,0 +1,17 @@ +# launch.api_client.model.create_model_bundle_v1_response.CreateModelBundleV1Response + +Response object for creating a Model Bundle. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for creating a Model Bundle. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateModelBundleV2Request.md b/docs/models/CreateModelBundleV2Request.md new file mode 100644 index 00000000..e504787d --- /dev/null +++ b/docs/models/CreateModelBundleV2Request.md @@ -0,0 +1,20 @@ +# launch.api_client.model.create_model_bundle_v2_request.CreateModelBundleV2Request + +Request object for creating a Model Bundle. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Request object for creating a Model Bundle. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateModelBundleV2Response.md b/docs/models/CreateModelBundleV2Response.md new file mode 100644 index 00000000..dd6408a5 --- /dev/null +++ b/docs/models/CreateModelBundleV2Response.md @@ -0,0 +1,17 @@ +# launch.api_client.model.create_model_bundle_v2_response.CreateModelBundleV2Response + +Response object for creating a Model Bundle. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for creating a Model Bundle. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateModelEndpointV1Request.md b/docs/models/CreateModelEndpointV1Request.md new file mode 100644 index 00000000..7c299b20 --- /dev/null +++ b/docs/models/CreateModelEndpointV1Request.md @@ -0,0 +1,37 @@ +# launch.api_client.model.create_model_endpoint_v1_request.CreateModelEndpointV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**endpoint_type** | [**ModelEndpointType**](ModelEndpointType.md) | [**ModelEndpointType**](ModelEndpointType.md) | | +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateModelEndpointV1Response.md b/docs/models/CreateModelEndpointV1Response.md new file mode 100644 index 00000000..ca6bdde5 --- /dev/null +++ b/docs/models/CreateModelEndpointV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_model_endpoint_v1_response.CreateModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateSGLangModelEndpointRequest.md b/docs/models/CreateSGLangModelEndpointRequest.md new file mode 100644 index 00000000..8db17a8d --- /dev/null +++ b/docs/models/CreateSGLangModelEndpointRequest.md @@ -0,0 +1,132 @@ +# launch.api_client.model.create_sg_lang_model_endpoint_request.CreateSGLangModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync +**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false +**tp_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tensor parallel size. | [optional] +**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, skip init tokenizer and pass input_ids in generate request | [optional] +**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. | [optional] +**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. | [optional] +**kv_cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. \"auto\" will use model data type. | [optional] +**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the JSON file containing the KV cache scaling factors. | [optional] +**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The quantization method. | [optional] +**context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model's maximum context length. | [optional] +**device** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The device type. | [optional] +**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Override the model name returned by the v1/models endpoint in OpenAI API server. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The builtin chat template name or path of the chat template file. | [optional] +**is_embedding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use a CausalLM as an embedding model. | [optional] +**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. | [optional] +**mem_fraction_static** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The fraction of the memory used for static allocation. | [optional] +**max_running_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of running requests. | [optional] +**max_total_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in the memory pool. | [optional] +**chunked_prefill_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a chunk for the chunked prefill. | [optional] +**max_prefill_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a prefill batch. | [optional] +**schedule_policy** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The scheduling policy of the requests. | [optional] +**schedule_conservativeness** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How conservative the schedule policy is. | [optional] +**cpu_offload_gb** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many GBs of RAM to reserve for CPU offloading | [optional] +**prefill_only_one_req** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, we only prefill one request at one prefill batch | [optional] +**stream_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The interval for streaming in terms of the token length. | [optional] +**random_seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The random seed. | [optional] +**constrained_json_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex pattern for syntactic whitespaces allowed in JSON constrained output. | [optional] +**watchdog_timeout** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set watchdog timeout in seconds. | [optional] +**download_dir** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model download directory. | [optional] +**base_gpu_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The base GPU ID to start allocating GPUs from. | [optional] +**log_level** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of all loggers. | [optional] +**log_level_http** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of HTTP server. | [optional] +**log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Log the inputs and outputs of all requests. | [optional] +**show_time_cost** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Show time cost of custom marks. | [optional] +**enable_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable log prometheus metrics. | [optional] +**decode_log_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log interval of decode batch. | [optional] +**api_key** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set API key of the server. | [optional] +**file_storage_pth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the file storage in backend. | [optional] +**enable_cache_report** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Return number of cached tokens in usage.prompt_tokens_details. | [optional] +**data_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The data parallelism size. | [optional] +**load_balance_method** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The load balancing strategy for data parallelism. | [optional] +**expert_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The expert parallelism size. | [optional] +**dist_init_addr** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The host address for initializing distributed backend. | [optional] +**nnodes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of nodes. | [optional] +**node_rank** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The node rank. | [optional] +**json_model_override_args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A dictionary in JSON string format used to override default model configurations. | [optional] +**lora_paths** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The list of LoRA adapters. | [optional] +**max_loras_per_batch** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of adapters for a running batch. | [optional] +**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for attention layers. | [optional] +**sampling_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for sampling layers. | [optional] +**grammar_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the backend for grammar-guided decoding. | [optional] +**speculative_algorithm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Speculative algorithm. | [optional] +**speculative_draft_model_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the draft model weights. | [optional] +**speculative_num_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of steps sampled from draft model in Speculative Decoding. | [optional] +**speculative_num_draft_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in Speculative Decoding. | [optional] +**speculative_eagle_topk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in eagle2 each step. | [optional] +**enable_double_sparsity** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable double sparsity attention | [optional] +**ds_channel_config_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the double sparsity channel config | [optional] +**ds_heavy_channel_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy channels in double sparsity attention | [optional] +**ds_heavy_token_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy tokens in double sparsity attention | [optional] +**ds_heavy_channel_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of heavy channels in double sparsity attention | [optional] +**ds_sparse_decode_threshold** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The threshold for sparse decoding in double sparsity attention | [optional] +**disable_radix_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable RadixAttention for prefix caching. | [optional] +**disable_jump_forward** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable jump-forward for grammar-guided decoding. | [optional] +**disable_cuda_graph** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph. | [optional] +**disable_cuda_graph_padding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph when padding is needed. | [optional] +**disable_outlines_disk_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable disk cache of outlines. | [optional] +**disable_custom_all_reduce** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the custom all-reduce kernel. | [optional] +**disable_mla** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable Multi-head Latent Attention (MLA) for DeepSeek-V2. | [optional] +**disable_overlap_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the overlap scheduler. | [optional] +**enable_mixed_chunk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable mixing prefill and decode in a batch when using chunked prefill. | [optional] +**enable_dp_attention** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable data parallelism for attention and tensor parallelism for FFN. | [optional] +**enable_ep_moe** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable expert parallelism for moe. | [optional] +**enable_torch_compile** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torch.compile. | [optional] +**torch_compile_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size when using torch compile. | [optional] +**cuda_graph_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size for cuda graph. | [optional] +**cuda_graph_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the list of batch sizes for cuda graph. | [optional] +**torchao_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torchao. | [optional] +**enable_nan_detection** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable the NaN detection for debugging purposes. | [optional] +**enable_p2p_check** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable P2P check for GPU access. | [optional] +**triton_attention_reduce_in_fp32** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Cast the intermediate attention results to fp32. | [optional] +**triton_attention_num_kv_splits** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of KV splits in flash decoding Triton kernel. | [optional] +**num_continuous_decode_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Run multiple continuous decoding steps to reduce scheduling overhead. | [optional] +**delete_ckpt_after_loading** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Delete the model checkpoint after loading the model. | [optional] +**enable_memory_saver** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow saving memory using release_memory_occupation and resume_memory_occupation | [optional] +**allow_auto_truncate** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow automatically truncating requests that exceed the maximum input length. | [optional] +**enable_custom_logit_processor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable users to pass custom logit processors to the server. | [optional] +**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specify the parser for handling tool-call interactions. | [optional] +**huggingface_repo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Hugging Face repository ID. | [optional] +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["sglang", ] if omitted the server will use the default value of sglang +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateTensorRTLLMModelEndpointRequest.md b/docs/models/CreateTensorRTLLMModelEndpointRequest.md new file mode 100644 index 00000000..be014059 --- /dev/null +++ b/docs/models/CreateTensorRTLLMModelEndpointRequest.md @@ -0,0 +1,44 @@ +# launch.api_client.model.create_tensor_rtllm_model_endpoint_request.CreateTensorRTLLMModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["tensorrt_llm", ] if omitted the server will use the default value of tensorrt_llm +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md b/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md new file mode 100644 index 00000000..ec6191c6 --- /dev/null +++ b/docs/models/CreateTextGenerationInferenceModelEndpointRequest.md @@ -0,0 +1,44 @@ +# launch.api_client.model.create_text_generation_inference_model_endpoint_request.CreateTextGenerationInferenceModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["text_generation_inference", ] if omitted the server will use the default value of text_generation_inference +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateTriggerV1Request.md b/docs/models/CreateTriggerV1Request.md new file mode 100644 index 00000000..08830f2d --- /dev/null +++ b/docs/models/CreateTriggerV1Request.md @@ -0,0 +1,19 @@ +# launch.api_client.model.create_trigger_v1_request.CreateTriggerV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**cron_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**default_job_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_job_metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateTriggerV1Response.md b/docs/models/CreateTriggerV1Response.md new file mode 100644 index 00000000..b714267e --- /dev/null +++ b/docs/models/CreateTriggerV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.create_trigger_v1_response.CreateTriggerV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**trigger_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CreateVLLMModelEndpointRequest.md b/docs/models/CreateVLLMModelEndpointRequest.md new file mode 100644 index 00000000..e4b09196 --- /dev/null +++ b/docs/models/CreateVLLMModelEndpointRequest.md @@ -0,0 +1,82 @@ +# launch.api_client.model.create_vllm_model_endpoint_request.CreateVLLMModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**source** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of hugging_face +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of latest +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**endpoint_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of sync +**max_gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig | [optional] +**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Attention backend to use for vLLM. Default to None. | [optional] +**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] +**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] +**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] +**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false +**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] +**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] +**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] +**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] +**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] +**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] +**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] +**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] +**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] +**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] +**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] +**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for reproducibility. | [optional] +**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] +**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] +**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] +**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] +**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] +**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] +**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] +**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] +**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] +**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] +**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] +**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] +**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] +**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["vllm", ] if omitted the server will use the default value of vllm +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/CustomFramework.md b/docs/models/CustomFramework.md new file mode 100644 index 00000000..d09b7660 --- /dev/null +++ b/docs/models/CustomFramework.md @@ -0,0 +1,19 @@ +# launch.api_client.model.custom_framework.CustomFramework + +This is the entity-layer class for a custom framework specification. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a custom framework specification. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**image_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**framework_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["custom_base_image", ] +**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/DeleteFileResponse.md b/docs/models/DeleteFileResponse.md new file mode 100644 index 00000000..198545e8 --- /dev/null +++ b/docs/models/DeleteFileResponse.md @@ -0,0 +1,17 @@ +# launch.api_client.model.delete_file_response.DeleteFileResponse + +Response object for deleting a file. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for deleting a file. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**deleted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether deletion was successful. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/DeleteLLMEndpointResponse.md b/docs/models/DeleteLLMEndpointResponse.md new file mode 100644 index 00000000..5306832d --- /dev/null +++ b/docs/models/DeleteLLMEndpointResponse.md @@ -0,0 +1,15 @@ +# launch.api_client.model.delete_llm_endpoint_response.DeleteLLMEndpointResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**deleted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/DeleteModelEndpointV1Response.md b/docs/models/DeleteModelEndpointV1Response.md new file mode 100644 index 00000000..062057f3 --- /dev/null +++ b/docs/models/DeleteModelEndpointV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.delete_model_endpoint_v1_response.DeleteModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**deleted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/DeleteTriggerV1Response.md b/docs/models/DeleteTriggerV1Response.md new file mode 100644 index 00000000..7ec831cc --- /dev/null +++ b/docs/models/DeleteTriggerV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.delete_trigger_v1_response.DeleteTriggerV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/DockerImageBatchJob.md b/docs/models/DockerImageBatchJob.md new file mode 100644 index 00000000..57327ff1 --- /dev/null +++ b/docs/models/DockerImageBatchJob.md @@ -0,0 +1,25 @@ +# launch.api_client.model.docker_image_batch_job.DockerImageBatchJob + +This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job created via the \"supply a docker image for a k8s job\" API. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job created via the \"supply a docker image for a k8s job\" API. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**owner** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_by** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**status** | [**BatchJobStatus**](BatchJobStatus.md) | [**BatchJobStatus**](BatchJobStatus.md) | | +**completed_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] value must conform to RFC-3339 date-time +**annotations** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**override_job_max_runtime_s** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 1 +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/DockerImageBatchJobBundleV1Response.md b/docs/models/DockerImageBatchJobBundleV1Response.md new file mode 100644 index 00000000..e50f5e43 --- /dev/null +++ b/docs/models/DockerImageBatchJobBundleV1Response.md @@ -0,0 +1,28 @@ +# launch.api_client.model.docker_image_batch_job_bundle_v1_response.DockerImageBatchJobBundleV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**image_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**mount_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/EndpointPredictV1Request.md b/docs/models/EndpointPredictV1Request.md new file mode 100644 index 00000000..ba60dd92 --- /dev/null +++ b/docs/models/EndpointPredictV1Request.md @@ -0,0 +1,21 @@ +# launch.api_client.model.endpoint_predict_v1_request.EndpointPredictV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cloudpickle** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**return_pickled** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**destination_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/File.md b/docs/models/File.md new file mode 100644 index 00000000..b0b00491 --- /dev/null +++ b/docs/models/File.md @@ -0,0 +1,17 @@ +# launch.api_client.model.file.File + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**filename** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the file, used when passing the file to the model as a string. | [optional] +**file_data** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The base64 encoded file data, used when passing the file to the model as a string. | [optional] +**file_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The ID of an uploaded file to use as input. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/FilteredChatCompletionV2Request.md b/docs/models/FilteredChatCompletionV2Request.md new file mode 100644 index 00000000..787824c2 --- /dev/null +++ b/docs/models/FilteredChatCompletionV2Request.md @@ -0,0 +1,72 @@ +# launch.api_client.model.filtered_chat_completion_v2_request.FilteredChatCompletionV2Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**messages** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of messages comprising the conversation so far. Depending on the [model](/docs/models) you use, different message types (modalities) are supported, like [text](/docs/guides/text-generation), [images](/docs/guides/vision), and [audio](/docs/guides/audio). | +**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of output sequences that are generated from the prompt. From these `best_of` sequences, the top `n` sequences are returned. `best_of` must be greater than or equal to `n`. This is treated as the beam width when `use_beam_search` is True. By default, `best_of` is set to `n`. | [optional] +**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] +**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] +**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] +**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] +**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] +**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] +**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] +**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. Defaults to False. | [optional] +**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] +**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] +**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the new message will be prepended with the last message if they belong to the same role. | [optional] +**add_generation_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model. | [optional] +**continue_final_message** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`. | [optional] +**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default). | [optional] +**documents** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given | [optional] +**chat_template_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Additional kwargs to pass to the template renderer. Will be accessible by the chat template. | [optional] +**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] +**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] +**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] +**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] +**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] +**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] +**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling. | [optional] +**metadata** | [**Metadata**](Metadata.md) | [**Metadata**](Metadata.md) | | [optional] +**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 +**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 +**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] +**service_tier** | [**ServiceTier**](ServiceTier.md) | [**ServiceTier**](ServiceTier.md) | | [optional] +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**modalities** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | [optional] +**reasoning_effort** | [**ReasoningEffort**](ReasoningEffort.md) | [**ReasoningEffort**](ReasoningEffort.md) | | [optional] +**max_completion_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning). | [optional] +**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. | [optional] if omitted the server will use the default value of 0 +**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. | [optional] if omitted the server will use the default value of 0 +**web_search_options** | [**WebSearchOptions**](WebSearchOptions.md) | [**WebSearchOptions**](WebSearchOptions.md) | This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat). | [optional] +**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | [optional] +**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An object specifying the format that the model must output. Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. | [optional] +**audio** | [**Audio2**](Audio2.md) | [**Audio2**](Audio2.md) | Parameters for audio output. Required when audio output is requested with `modalities: [\"audio\"]`. [Learn more](/docs/guides/audio). | [optional] +**store** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether or not to store the output of this chat completion request for use in our [model distillation](/docs/guides/distillation) or [evals](/docs/guides/evals) products. | [optional] if omitted the server will use the default value of false +**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] +**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | [optional] +**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. | [optional] if omitted the server will use the default value of false +**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. This value can be used to control [costs](https://openai.com/api/pricing/) for text generated via API. This value is now deprecated in favor of `max_completion_tokens`, and is not compatible with [o-series models](/docs/guides/reasoning). | [optional] +**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. | [optional] if omitted the server will use the default value of 1 +**prediction** | [**PredictionContent**](PredictionContent.md) | [**PredictionContent**](PredictionContent.md) | Configuration for a [Predicted Output](/docs/guides/predicted-outputs), which can greatly improve response times when large parts of the model response are known ahead of time. This is most common when you are regenerating a file with only minor changes to most of the content. | [optional] +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] +**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] +**tools** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. | [optional] +**tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls which (if any) tool is called by the model. `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. `none` is the default when no tools are present. `auto` is the default if tools are present. | [optional] +**parallel_tool_calls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | [optional] +**function_call** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. | [optional] +**functions** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/FilteredCompletionV2Request.md b/docs/models/FilteredCompletionV2Request.md new file mode 100644 index 00000000..c41361d8 --- /dev/null +++ b/docs/models/FilteredCompletionV2Request.md @@ -0,0 +1,52 @@ +# launch.api_client.model.filtered_completion_v2_request.FilteredCompletionV2Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | +**best_of** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 +**top_k** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the number of top tokens to consider. -1 means consider all tokens. | [optional] +**min_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that represents the minimum probability for a token to be considered, relative to the probability of the most likely token. Must be in [0, 1]. Set to 0 to disable this. | [optional] +**use_beam_search** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use beam search for sampling. | [optional] +**length_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes sequences based on their length. Used in beam search. | [optional] +**repetition_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far. Values > 1 encourage the model to use new tokens, while values < 1 encourage the model to repeat tokens. | [optional] +**early_stopping** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Controls the stopping condition for beam search. It accepts the following values: `True`, where the generation stops as soon as there are `best_of` complete candidates; `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates; `\"never\"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm). | [optional] +**stop_token_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of tokens that stop the generation when they are generated. The returned output will contain the stop tokens unless the stop tokens are special tokens. | [optional] +**include_stop_str_in_output** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to include the stop strings in output text. | [optional] +**ignore_eos** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to ignore the EOS token and continue generating tokens after the EOS token is generated. | [optional] +**min_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Minimum number of tokens to generate per output sequence before EOS or stop_token_ids can be generated | [optional] +**skip_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to skip special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**spaces_between_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to add spaces between special tokens in the output. Only supported in vllm. | [optional] if omitted the server will use the default value of true +**add_special_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true (the default), special tokens (e.g. BOS) will be added to the prompt. | [optional] +**response_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported. | [optional] +**guided_json** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | JSON schema for guided decoding. Only supported in vllm. | [optional] +**guided_regex** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex for guided decoding. Only supported in vllm. | [optional] +**guided_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choices for guided decoding. Only supported in vllm. | [optional] +**guided_grammar** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Context-free grammar for guided decoding. Only supported in vllm. | [optional] +**guided_decoding_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer' | [optional] +**guided_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, will override the default whitespace pattern for guided json decoding. | [optional] +**model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**echo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Echo back the prompt in addition to the completion | [optional] if omitted the server will use the default value of false +**frequency_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 +**logit_bias** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated. | [optional] +**logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. | [optional] +**max_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. | [optional] if omitted the server will use the default value of 16 +**n** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. | [optional] if omitted the server will use the default value of 1 +**presence_penalty** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation) | [optional] if omitted the server will use the default value of 0 +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. | [optional] +**stop** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | [optional] +**stream** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**stream_options** | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | [**ChatCompletionStreamOptions**](ChatCompletionStreamOptions.md) | | [optional] +**suffix** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The suffix that comes after a completion of inserted text. This parameter is only supported for `gpt-3.5-turbo-instruct`. | [optional] +**temperature** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | [optional] if omitted the server will use the default value of 1 +**top_p** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | [optional] if omitted the server will use the default value of 1 +**user** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids). | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Function1.md b/docs/models/Function1.md new file mode 100644 index 00000000..b8abe0fb --- /dev/null +++ b/docs/models/Function1.md @@ -0,0 +1,16 @@ +# launch.api_client.model.function1.Function1 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | +**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Function2.md b/docs/models/Function2.md new file mode 100644 index 00000000..7cc1484e --- /dev/null +++ b/docs/models/Function2.md @@ -0,0 +1,16 @@ +# launch.api_client.model.function2.Function2 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | [optional] +**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Function3.md b/docs/models/Function3.md new file mode 100644 index 00000000..f91e8561 --- /dev/null +++ b/docs/models/Function3.md @@ -0,0 +1,15 @@ +# launch.api_client.model.function3.Function3 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/FunctionCall.md b/docs/models/FunctionCall.md new file mode 100644 index 00000000..c440d9a3 --- /dev/null +++ b/docs/models/FunctionCall.md @@ -0,0 +1,16 @@ +# launch.api_client.model.function_call.FunctionCall + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | +**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/FunctionCall2.md b/docs/models/FunctionCall2.md new file mode 100644 index 00000000..5f3a8af3 --- /dev/null +++ b/docs/models/FunctionCall2.md @@ -0,0 +1,16 @@ +# launch.api_client.model.function_call2.FunctionCall2 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**arguments** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | [optional] +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to call. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/FunctionObject.md b/docs/models/FunctionObject.md new file mode 100644 index 00000000..f5316302 --- /dev/null +++ b/docs/models/FunctionObject.md @@ -0,0 +1,18 @@ +# launch.api_client.model.function_object.FunctionObject + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +**description** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A description of what the function does, used by the model to choose when and how to call the function. | [optional] +**parameters** | [**FunctionParameters**](FunctionParameters.md) | [**FunctionParameters**](FunctionParameters.md) | | [optional] +**strict** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling). | [optional] if omitted the server will use the default value of false +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/FunctionParameters.md b/docs/models/FunctionParameters.md new file mode 100644 index 00000000..edc0955e --- /dev/null +++ b/docs/models/FunctionParameters.md @@ -0,0 +1,14 @@ +# launch.api_client.model.function_parameters.FunctionParameters + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetAsyncTaskV1Response.md b/docs/models/GetAsyncTaskV1Response.md new file mode 100644 index 00000000..062b0cca --- /dev/null +++ b/docs/models/GetAsyncTaskV1Response.md @@ -0,0 +1,19 @@ +# launch.api_client.model.get_async_task_v1_response.GetAsyncTaskV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**status** | [**TaskStatus**](TaskStatus.md) | [**TaskStatus**](TaskStatus.md) | | +**result** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**traceback** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**status_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetBatchCompletionV2Response.md b/docs/models/GetBatchCompletionV2Response.md new file mode 100644 index 00000000..59366755 --- /dev/null +++ b/docs/models/GetBatchCompletionV2Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.get_batch_completion_v2_response.GetBatchCompletionV2Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**job** | [**BatchCompletionsJob**](BatchCompletionsJob.md) | [**BatchCompletionsJob**](BatchCompletionsJob.md) | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetBatchJobV1Response.md b/docs/models/GetBatchJobV1Response.md new file mode 100644 index 00000000..d46aaa29 --- /dev/null +++ b/docs/models/GetBatchJobV1Response.md @@ -0,0 +1,19 @@ +# launch.api_client.model.get_batch_job_v1_response.GetBatchJobV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**duration** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**status** | [**BatchJobStatus**](BatchJobStatus.md) | [**BatchJobStatus**](BatchJobStatus.md) | | +**result** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_tasks_pending** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_tasks_completed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetDockerImageBatchJobV1Response.md b/docs/models/GetDockerImageBatchJobV1Response.md new file mode 100644 index 00000000..69c63d3c --- /dev/null +++ b/docs/models/GetDockerImageBatchJobV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.get_docker_image_batch_job_v1_response.GetDockerImageBatchJobV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**status** | [**BatchJobStatus**](BatchJobStatus.md) | [**BatchJobStatus**](BatchJobStatus.md) | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetFileContentResponse.md b/docs/models/GetFileContentResponse.md new file mode 100644 index 00000000..aca9dab1 --- /dev/null +++ b/docs/models/GetFileContentResponse.md @@ -0,0 +1,18 @@ +# launch.api_client.model.get_file_content_response.GetFileContentResponse + +Response object for retrieving a file's content. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for retrieving a file's content. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the requested file. | +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | File content. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetFileResponse.md b/docs/models/GetFileResponse.md new file mode 100644 index 00000000..e7d7a480 --- /dev/null +++ b/docs/models/GetFileResponse.md @@ -0,0 +1,19 @@ +# launch.api_client.model.get_file_response.GetFileResponse + +Response object for retrieving a file. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for retrieving a file. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**filename** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | File name. | +**size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Length of the file, in characters. | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the requested file. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetFineTuneEventsResponse.md b/docs/models/GetFineTuneEventsResponse.md new file mode 100644 index 00000000..e361c2b6 --- /dev/null +++ b/docs/models/GetFineTuneEventsResponse.md @@ -0,0 +1,15 @@ +# launch.api_client.model.get_fine_tune_events_response.GetFineTuneEventsResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**events** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetFineTuneResponse.md b/docs/models/GetFineTuneResponse.md new file mode 100644 index 00000000..e5bcd2f3 --- /dev/null +++ b/docs/models/GetFineTuneResponse.md @@ -0,0 +1,17 @@ +# launch.api_client.model.get_fine_tune_response.GetFineTuneResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Unique ID of the fine tune | +**status** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Status of the requested fine tune. | +**fine_tuned_model** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name of the resulting fine-tuned model. This can be plugged into the Completion API ones the fine-tune is complete | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetLLMModelEndpointV1Response.md b/docs/models/GetLLMModelEndpointV1Response.md new file mode 100644 index 00000000..c3151dc7 --- /dev/null +++ b/docs/models/GetLLMModelEndpointV1Response.md @@ -0,0 +1,26 @@ +# launch.api_client.model.get_llm_model_endpoint_v1_response.GetLLMModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**inference_framework** | [**LLMInferenceFramework**](LLMInferenceFramework.md) | [**LLMInferenceFramework**](LLMInferenceFramework.md) | | +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | +**status** | [**ModelEndpointStatus**](ModelEndpointStatus.md) | [**ModelEndpointStatus**](ModelEndpointStatus.md) | | +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**spec** | [**GetModelEndpointV1Response**](GetModelEndpointV1Response.md) | [**GetModelEndpointV1Response**](GetModelEndpointV1Response.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetModelEndpointV1Response.md b/docs/models/GetModelEndpointV1Response.md new file mode 100644 index 00000000..11477156 --- /dev/null +++ b/docs/models/GetModelEndpointV1Response.md @@ -0,0 +1,35 @@ +# launch.api_client.model.get_model_endpoint_v1_response.GetModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**endpoint_type** | [**ModelEndpointType**](ModelEndpointType.md) | [**ModelEndpointType**](ModelEndpointType.md) | | +**last_updated_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time +**destination** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time +**bundle_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_by** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**status** | [**ModelEndpointStatus**](ModelEndpointStatus.md) | [**ModelEndpointStatus**](ModelEndpointStatus.md) | | +**deployment_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**aws_role** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**results_s3_bucket** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**deployment_state** | [**ModelEndpointDeploymentState**](ModelEndpointDeploymentState.md) | [**ModelEndpointDeploymentState**](ModelEndpointDeploymentState.md) | | [optional] +**resource_state** | [**ModelEndpointResourceState**](ModelEndpointResourceState.md) | [**ModelEndpointResourceState**](ModelEndpointResourceState.md) | | [optional] +**num_queued_items** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GetTriggerV1Response.md b/docs/models/GetTriggerV1Response.md new file mode 100644 index 00000000..67616be5 --- /dev/null +++ b/docs/models/GetTriggerV1Response.md @@ -0,0 +1,23 @@ +# launch.api_client.model.get_trigger_v1_response.GetTriggerV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**owner** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**cron_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**docker_image_batch_job_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_by** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**default_job_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_job_metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/GpuType.md b/docs/models/GpuType.md new file mode 100644 index 00000000..2710e41e --- /dev/null +++ b/docs/models/GpuType.md @@ -0,0 +1,11 @@ +# launch.api_client.model.gpu_type.GpuType + +Lists allowed GPU types for Launch. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Lists allowed GPU types for Launch. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/HTTPValidationError.md b/docs/models/HTTPValidationError.md new file mode 100644 index 00000000..9b4bbdc9 --- /dev/null +++ b/docs/models/HTTPValidationError.md @@ -0,0 +1,15 @@ +# launch.api_client.model.http_validation_error.HTTPValidationError + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**detail** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ImageUrl.md b/docs/models/ImageUrl.md new file mode 100644 index 00000000..f3a8ae25 --- /dev/null +++ b/docs/models/ImageUrl.md @@ -0,0 +1,16 @@ +# launch.api_client.model.image_url.ImageUrl + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Either a URL of the image or the base64 encoded image data. | +**detail** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding). | [optional] must be one of ["auto", "low", "high", ] if omitted the server will use the default value of auto +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/InputAudio.md b/docs/models/InputAudio.md new file mode 100644 index 00000000..428ef818 --- /dev/null +++ b/docs/models/InputAudio.md @@ -0,0 +1,16 @@ +# launch.api_client.model.input_audio.InputAudio + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**data** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Base64 encoded audio data. | +**format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the encoded audio data. Currently supports \"wav\" and \"mp3\". | must be one of ["wav", "mp3", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/JsonSchema.md b/docs/models/JsonSchema.md new file mode 100644 index 00000000..c1d1acd6 --- /dev/null +++ b/docs/models/JsonSchema.md @@ -0,0 +1,18 @@ +# launch.api_client.model.json_schema.JsonSchema + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +**description** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A description of what the response format is for, used by the model to determine how to respond in the format. | [optional] +**schema** | [**ResponseFormatJsonSchemaSchema**](ResponseFormatJsonSchemaSchema.md) | [**ResponseFormatJsonSchemaSchema**](ResponseFormatJsonSchemaSchema.md) | | [optional] +**strict** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable strict schema adherence when generating the output. If set to true, the model will always follow the exact schema defined in the `schema` field. Only a subset of JSON Schema is supported when `strict` is `true`. To learn more, read the [Structured Outputs guide](/docs/guides/structured-outputs). | [optional] if omitted the server will use the default value of false +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/LLMFineTuneEvent.md b/docs/models/LLMFineTuneEvent.md new file mode 100644 index 00000000..a3dbcda8 --- /dev/null +++ b/docs/models/LLMFineTuneEvent.md @@ -0,0 +1,17 @@ +# launch.api_client.model.llm_fine_tune_event.LLMFineTuneEvent + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**level** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**message** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**timestamp** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/LLMInferenceFramework.md b/docs/models/LLMInferenceFramework.md new file mode 100644 index 00000000..897cabdd --- /dev/null +++ b/docs/models/LLMInferenceFramework.md @@ -0,0 +1,9 @@ +# launch.api_client.model.llm_inference_framework.LLMInferenceFramework + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/LLMSource.md b/docs/models/LLMSource.md new file mode 100644 index 00000000..7cbd03f2 --- /dev/null +++ b/docs/models/LLMSource.md @@ -0,0 +1,9 @@ +# launch.api_client.model.llm_source.LLMSource + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListDockerImageBatchJobBundleV1Response.md b/docs/models/ListDockerImageBatchJobBundleV1Response.md new file mode 100644 index 00000000..d1d5204f --- /dev/null +++ b/docs/models/ListDockerImageBatchJobBundleV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.list_docker_image_batch_job_bundle_v1_response.ListDockerImageBatchJobBundleV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**docker_image_batch_job_bundles** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListDockerImageBatchJobsV1Response.md b/docs/models/ListDockerImageBatchJobsV1Response.md new file mode 100644 index 00000000..4cb6a053 --- /dev/null +++ b/docs/models/ListDockerImageBatchJobsV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.list_docker_image_batch_jobs_v1_response.ListDockerImageBatchJobsV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**jobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListFilesResponse.md b/docs/models/ListFilesResponse.md new file mode 100644 index 00000000..50fb04f2 --- /dev/null +++ b/docs/models/ListFilesResponse.md @@ -0,0 +1,17 @@ +# launch.api_client.model.list_files_response.ListFilesResponse + +Response object for listing files. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for listing files. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**files** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | List of file IDs, names, and sizes. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListFineTunesResponse.md b/docs/models/ListFineTunesResponse.md new file mode 100644 index 00000000..2222ad09 --- /dev/null +++ b/docs/models/ListFineTunesResponse.md @@ -0,0 +1,15 @@ +# launch.api_client.model.list_fine_tunes_response.ListFineTunesResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**jobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListLLMModelEndpointsV1Response.md b/docs/models/ListLLMModelEndpointsV1Response.md new file mode 100644 index 00000000..9ebdfbb2 --- /dev/null +++ b/docs/models/ListLLMModelEndpointsV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.list_llm_model_endpoints_v1_response.ListLLMModelEndpointsV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_endpoints** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListModelBundlesV1Response.md b/docs/models/ListModelBundlesV1Response.md new file mode 100644 index 00000000..77f59123 --- /dev/null +++ b/docs/models/ListModelBundlesV1Response.md @@ -0,0 +1,17 @@ +# launch.api_client.model.list_model_bundles_v1_response.ListModelBundlesV1Response + +Response object for listing Model Bundles. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for listing Model Bundles. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_bundles** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListModelBundlesV2Response.md b/docs/models/ListModelBundlesV2Response.md new file mode 100644 index 00000000..19769683 --- /dev/null +++ b/docs/models/ListModelBundlesV2Response.md @@ -0,0 +1,17 @@ +# launch.api_client.model.list_model_bundles_v2_response.ListModelBundlesV2Response + +Response object for listing Model Bundles. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for listing Model Bundles. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_bundles** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListModelEndpointsV1Response.md b/docs/models/ListModelEndpointsV1Response.md new file mode 100644 index 00000000..e0c8f0a3 --- /dev/null +++ b/docs/models/ListModelEndpointsV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.list_model_endpoints_v1_response.ListModelEndpointsV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_endpoints** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ListTriggersV1Response.md b/docs/models/ListTriggersV1Response.md new file mode 100644 index 00000000..8da56be0 --- /dev/null +++ b/docs/models/ListTriggersV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.list_triggers_v1_response.ListTriggersV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**triggers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Logprobs.md b/docs/models/Logprobs.md new file mode 100644 index 00000000..afd996ab --- /dev/null +++ b/docs/models/Logprobs.md @@ -0,0 +1,16 @@ +# launch.api_client.model.logprobs.Logprobs + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**refusal** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of message refusal tokens with log probability information. | +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of message content tokens with log probability information. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Logprobs2.md b/docs/models/Logprobs2.md new file mode 100644 index 00000000..de11800c --- /dev/null +++ b/docs/models/Logprobs2.md @@ -0,0 +1,18 @@ +# launch.api_client.model.logprobs2.Logprobs2 + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**text_offset** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**token_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**top_logprobs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Metadata.md b/docs/models/Metadata.md new file mode 100644 index 00000000..ed7ad8fd --- /dev/null +++ b/docs/models/Metadata.md @@ -0,0 +1,14 @@ +# launch.api_client.model.metadata.Metadata + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelBundleEnvironmentParams.md b/docs/models/ModelBundleEnvironmentParams.md new file mode 100644 index 00000000..27a3168f --- /dev/null +++ b/docs/models/ModelBundleEnvironmentParams.md @@ -0,0 +1,21 @@ +# launch.api_client.model.model_bundle_environment_params.ModelBundleEnvironmentParams + +This is the entity-layer class for the Model Bundle environment parameters. Being an entity-layer class, it should be a plain data object. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle environment parameters. Being an entity-layer class, it should be a plain data object. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**framework_type** | [**ModelBundleFrameworkType**](ModelBundleFrameworkType.md) | [**ModelBundleFrameworkType**](ModelBundleFrameworkType.md) | | +**pytorch_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**tensorflow_version** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**ecr_repo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelBundleFrameworkType.md b/docs/models/ModelBundleFrameworkType.md new file mode 100644 index 00000000..432d5097 --- /dev/null +++ b/docs/models/ModelBundleFrameworkType.md @@ -0,0 +1,11 @@ +# launch.api_client.model.model_bundle_framework_type.ModelBundleFrameworkType + +The canonical list of possible machine learning frameworks of Model Bundles. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible machine learning frameworks of Model Bundles. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelBundleOrderBy.md b/docs/models/ModelBundleOrderBy.md new file mode 100644 index 00000000..846cb988 --- /dev/null +++ b/docs/models/ModelBundleOrderBy.md @@ -0,0 +1,11 @@ +# launch.api_client.model.model_bundle_order_by.ModelBundleOrderBy + +The canonical list of possible orderings of Model Bundles. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible orderings of Model Bundles. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelBundlePackagingType.md b/docs/models/ModelBundlePackagingType.md new file mode 100644 index 00000000..c3fbbfd1 --- /dev/null +++ b/docs/models/ModelBundlePackagingType.md @@ -0,0 +1,11 @@ +# launch.api_client.model.model_bundle_packaging_type.ModelBundlePackagingType + +The canonical list of possible packaging types for Model Bundles. These values broadly determine how the model endpoint will obtain its code & dependencies. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible packaging types for Model Bundles. These values broadly determine how the model endpoint will obtain its code & dependencies. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelBundleV1Response.md b/docs/models/ModelBundleV1Response.md new file mode 100644 index 00000000..a10e7905 --- /dev/null +++ b/docs/models/ModelBundleV1Response.md @@ -0,0 +1,27 @@ +# launch.api_client.model.model_bundle_v1_response.ModelBundleV1Response + +Response object for a single Model Bundle. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for a single Model Bundle. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_artifact_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**packaging_type** | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | [**ModelBundlePackagingType**](ModelBundlePackagingType.md) | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time +**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**env_params** | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | [**ModelBundleEnvironmentParams**](ModelBundleEnvironmentParams.md) | | +**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelBundleV2Response.md b/docs/models/ModelBundleV2Response.md new file mode 100644 index 00000000..25fb8517 --- /dev/null +++ b/docs/models/ModelBundleV2Response.md @@ -0,0 +1,23 @@ +# launch.api_client.model.model_bundle_v2_response.ModelBundleV2Response + +Response object for a single Model Bundle. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for a single Model Bundle. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_artifact_ids** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | value must conform to RFC-3339 date-time +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**schema_location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelDownloadRequest.md b/docs/models/ModelDownloadRequest.md new file mode 100644 index 00000000..1fe07b7c --- /dev/null +++ b/docs/models/ModelDownloadRequest.md @@ -0,0 +1,16 @@ +# launch.api_client.model.model_download_request.ModelDownloadRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name of the fine tuned model | +**download_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Format that you want the downloaded urls to be compatible with. Currently only supports hugging_face | [optional] if omitted the server will use the default value of hugging_face +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelDownloadResponse.md b/docs/models/ModelDownloadResponse.md new file mode 100644 index 00000000..88e55683 --- /dev/null +++ b/docs/models/ModelDownloadResponse.md @@ -0,0 +1,15 @@ +# launch.api_client.model.model_download_response.ModelDownloadResponse + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**urls** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary of (file_name, url) pairs to download the model from. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelEndpointDeploymentState.md b/docs/models/ModelEndpointDeploymentState.md new file mode 100644 index 00000000..a69334d1 --- /dev/null +++ b/docs/models/ModelEndpointDeploymentState.md @@ -0,0 +1,22 @@ +# launch.api_client.model.model_endpoint_deployment_state.ModelEndpointDeploymentState + +This is the entity-layer class for the deployment settings related to a Model Endpoint. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the deployment settings related to a Model Endpoint. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**available_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**unavailable_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelEndpointOrderBy.md b/docs/models/ModelEndpointOrderBy.md new file mode 100644 index 00000000..23a43e36 --- /dev/null +++ b/docs/models/ModelEndpointOrderBy.md @@ -0,0 +1,11 @@ +# launch.api_client.model.model_endpoint_order_by.ModelEndpointOrderBy + +The canonical list of possible orderings of Model Bundles. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The canonical list of possible orderings of Model Bundles. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelEndpointResourceState.md b/docs/models/ModelEndpointResourceState.md new file mode 100644 index 00000000..127d153a --- /dev/null +++ b/docs/models/ModelEndpointResourceState.md @@ -0,0 +1,23 @@ +# launch.api_client.model.model_endpoint_resource_state.ModelEndpointResourceState + +This is the entity-layer class for the resource settings per worker of a Model Endpoint. Note: in the multinode case, there are multiple \"nodes\" per \"worker\". \"Nodes\" is analogous to a single k8s pod that may take up all the GPUs on a single machine. \"Workers\" is the smallest unit that a request can be made to, and consists of one leader \"node\" and multiple follower \"nodes\" (named \"worker\" in the k8s LeaderWorkerSet definition). cpus/gpus/memory/storage are per-node, thus the total consumption by a \"worker\" is cpus/gpus/etc. multiplied by nodes_per_worker. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the resource settings per worker of a Model Endpoint. Note: in the multinode case, there are multiple \"nodes\" per \"worker\". \"Nodes\" is analogous to a single k8s pod that may take up all the GPUs on a single machine. \"Workers\" is the smallest unit that a request can be made to, and consists of one leader \"node\" and multiple follower \"nodes\" (named \"worker\" in the k8s LeaderWorkerSet definition). cpus/gpus/memory/storage are per-node, thus the total consumption by a \"worker\" is cpus/gpus/etc. multiplied by nodes_per_worker. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelEndpointStatus.md b/docs/models/ModelEndpointStatus.md new file mode 100644 index 00000000..5fbb86e3 --- /dev/null +++ b/docs/models/ModelEndpointStatus.md @@ -0,0 +1,9 @@ +# launch.api_client.model.model_endpoint_status.ModelEndpointStatus + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ModelEndpointType.md b/docs/models/ModelEndpointType.md new file mode 100644 index 00000000..7b4ef324 --- /dev/null +++ b/docs/models/ModelEndpointType.md @@ -0,0 +1,9 @@ +# launch.api_client.model.model_endpoint_type.ModelEndpointType + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ParallelToolCalls.md b/docs/models/ParallelToolCalls.md new file mode 100644 index 00000000..6e1a6e56 --- /dev/null +++ b/docs/models/ParallelToolCalls.md @@ -0,0 +1,11 @@ +# launch.api_client.model.parallel_tool_calls.ParallelToolCalls + +Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/PredictionContent.md b/docs/models/PredictionContent.md new file mode 100644 index 00000000..ee67de3c --- /dev/null +++ b/docs/models/PredictionContent.md @@ -0,0 +1,16 @@ +# launch.api_client.model.prediction_content.PredictionContent + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of the predicted content you want to provide. This type is currently always `content`. | must be one of ["content", ] +**content** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The content that should be matched when generating a model response. If generated tokens would match this content, the entire model response can be returned much more quickly. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Prompt.md b/docs/models/Prompt.md new file mode 100644 index 00000000..8386200e --- /dev/null +++ b/docs/models/Prompt.md @@ -0,0 +1,11 @@ +# launch.api_client.model.prompt.Prompt + +The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | if omitted the server will use the default value of <|endoftext|> + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Prompt1.md b/docs/models/Prompt1.md new file mode 100644 index 00000000..60a7c0c4 --- /dev/null +++ b/docs/models/Prompt1.md @@ -0,0 +1,11 @@ +# launch.api_client.model.prompt1.Prompt1 + +The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. | if omitted the server will use the default value of <|endoftext|> + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Prompt1Item.md b/docs/models/Prompt1Item.md new file mode 100644 index 00000000..e52fe744 --- /dev/null +++ b/docs/models/Prompt1Item.md @@ -0,0 +1,9 @@ +# launch.api_client.model.prompt1_item.Prompt1Item + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/PromptTokensDetails.md b/docs/models/PromptTokensDetails.md new file mode 100644 index 00000000..5c8aa2e9 --- /dev/null +++ b/docs/models/PromptTokensDetails.md @@ -0,0 +1,16 @@ +# launch.api_client.model.prompt_tokens_details.PromptTokensDetails + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**audio_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Audio input tokens present in the prompt. | [optional] if omitted the server will use the default value of 0 +**cached_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Cached tokens present in the prompt. | [optional] if omitted the server will use the default value of 0 +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/PytorchFramework.md b/docs/models/PytorchFramework.md new file mode 100644 index 00000000..a41c6bf7 --- /dev/null +++ b/docs/models/PytorchFramework.md @@ -0,0 +1,18 @@ +# launch.api_client.model.pytorch_framework.PytorchFramework + +This is the entity-layer class for a Pytorch framework specification. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a Pytorch framework specification. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**pytorch_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**framework_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["pytorch", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/Quantization.md b/docs/models/Quantization.md new file mode 100644 index 00000000..c8518147 --- /dev/null +++ b/docs/models/Quantization.md @@ -0,0 +1,9 @@ +# launch.api_client.model.quantization.Quantization + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ReasoningEffort.md b/docs/models/ReasoningEffort.md new file mode 100644 index 00000000..80470a58 --- /dev/null +++ b/docs/models/ReasoningEffort.md @@ -0,0 +1,11 @@ +# launch.api_client.model.reasoning_effort.ReasoningEffort + +**o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | **o-series models only** Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently supported values are `low`, `medium`, and `high`. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response. | if omitted the server will use the default value of medium + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/RequestSchema.md b/docs/models/RequestSchema.md new file mode 100644 index 00000000..94ff7eb4 --- /dev/null +++ b/docs/models/RequestSchema.md @@ -0,0 +1,9 @@ +# launch.api_client.model.request_schema.RequestSchema + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ResponseFormatJsonObject.md b/docs/models/ResponseFormatJsonObject.md new file mode 100644 index 00000000..0bde48f3 --- /dev/null +++ b/docs/models/ResponseFormatJsonObject.md @@ -0,0 +1,15 @@ +# launch.api_client.model.response_format_json_object.ResponseFormatJsonObject + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of response format being defined. Always `json_object`. | must be one of ["json_object", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ResponseFormatJsonSchema.md b/docs/models/ResponseFormatJsonSchema.md new file mode 100644 index 00000000..f8b99f8c --- /dev/null +++ b/docs/models/ResponseFormatJsonSchema.md @@ -0,0 +1,16 @@ +# launch.api_client.model.response_format_json_schema.ResponseFormatJsonSchema + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**json_schema** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Structured Outputs configuration options, including a JSON Schema. | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of response format being defined. Always `json_schema`. | must be one of ["json_schema", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ResponseFormatJsonSchemaSchema.md b/docs/models/ResponseFormatJsonSchemaSchema.md new file mode 100644 index 00000000..2d4fd497 --- /dev/null +++ b/docs/models/ResponseFormatJsonSchemaSchema.md @@ -0,0 +1,14 @@ +# launch.api_client.model.response_format_json_schema_schema.ResponseFormatJsonSchemaSchema + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ResponseFormatText.md b/docs/models/ResponseFormatText.md new file mode 100644 index 00000000..5ab6cd35 --- /dev/null +++ b/docs/models/ResponseFormatText.md @@ -0,0 +1,15 @@ +# launch.api_client.model.response_format_text.ResponseFormatText + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of response format being defined. Always `text`. | must be one of ["text", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ResponseModalities.md b/docs/models/ResponseModalities.md new file mode 100644 index 00000000..cd0d308d --- /dev/null +++ b/docs/models/ResponseModalities.md @@ -0,0 +1,11 @@ +# launch.api_client.model.response_modalities.ResponseModalities + +Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Output types that you would like the model to generate. Most models are capable of generating text, which is the default: `[\"text\"]` The `gpt-4o-audio-preview` model can also be used to [generate audio](/docs/guides/audio). To request that this model generate both text and audio responses, you can use: `[\"text\", \"audio\"]` | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ResponseSchema.md b/docs/models/ResponseSchema.md new file mode 100644 index 00000000..59b49aea --- /dev/null +++ b/docs/models/ResponseSchema.md @@ -0,0 +1,9 @@ +# launch.api_client.model.response_schema.ResponseSchema + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/RestartModelEndpointV1Response.md b/docs/models/RestartModelEndpointV1Response.md new file mode 100644 index 00000000..af9200e2 --- /dev/null +++ b/docs/models/RestartModelEndpointV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.restart_model_endpoint_v1_response.RestartModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**restarted** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/RunnableImageFlavor.md b/docs/models/RunnableImageFlavor.md new file mode 100644 index 00000000..1ef098b7 --- /dev/null +++ b/docs/models/RunnableImageFlavor.md @@ -0,0 +1,30 @@ +# launch.api_client.model.runnable_image_flavor.RunnableImageFlavor + +This is the entity-layer class for the Model Bundle flavor of a runnable image. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle flavor of a runnable image. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["runnable_image", ] +**protocol** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["http", ] +**tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /predict +**healthcheck_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /readyz +**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 120 +**extra_routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**forwarder_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of default +**worker_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**worker_env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ServiceTier.md b/docs/models/ServiceTier.md new file mode 100644 index 00000000..2c1ac1ba --- /dev/null +++ b/docs/models/ServiceTier.md @@ -0,0 +1,11 @@ +# launch.api_client.model.service_tier.ServiceTier + +Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: - If set to 'auto', and the Project is Scale tier enabled, the system will utilize scale tier credits until they are exhausted. - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). - When not set, the default behavior is 'auto'. When this parameter is set, the response body will include the `service_tier` utilized. | if omitted the server will use the default value of auto + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/StopConfiguration.md b/docs/models/StopConfiguration.md new file mode 100644 index 00000000..5efb0007 --- /dev/null +++ b/docs/models/StopConfiguration.md @@ -0,0 +1,11 @@ +# launch.api_client.model.stop_configuration.StopConfiguration + +Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/StopConfiguration1.md b/docs/models/StopConfiguration1.md new file mode 100644 index 00000000..98464186 --- /dev/null +++ b/docs/models/StopConfiguration1.md @@ -0,0 +1,11 @@ +# launch.api_client.model.stop_configuration1.StopConfiguration1 + +Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Not supported with latest reasoning models `o3` and `o4-mini`. Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/StreamError.md b/docs/models/StreamError.md new file mode 100644 index 00000000..4ba929e1 --- /dev/null +++ b/docs/models/StreamError.md @@ -0,0 +1,18 @@ +# launch.api_client.model.stream_error.StreamError + +Error object for a stream prompt completion task. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Error object for a stream prompt completion task. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**status_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**content** | [**StreamErrorContent**](StreamErrorContent.md) | [**StreamErrorContent**](StreamErrorContent.md) | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/StreamErrorContent.md b/docs/models/StreamErrorContent.md new file mode 100644 index 00000000..4f7f0323 --- /dev/null +++ b/docs/models/StreamErrorContent.md @@ -0,0 +1,16 @@ +# launch.api_client.model.stream_error_content.StreamErrorContent + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**error** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**timestamp** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/StreamingEnhancedRunnableImageFlavor.md b/docs/models/StreamingEnhancedRunnableImageFlavor.md new file mode 100644 index 00000000..b9a67753 --- /dev/null +++ b/docs/models/StreamingEnhancedRunnableImageFlavor.md @@ -0,0 +1,32 @@ +# launch.api_client.model.streaming_enhanced_runnable_image_flavor.StreamingEnhancedRunnableImageFlavor + +For deployments that expose a streaming route in a container. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | For deployments that expose a streaming route in a container. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["streaming_enhanced_runnable_image", ] +**protocol** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["http", ] +**tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**streaming_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of [] +**predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /predict +**healthcheck_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /readyz +**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 120 +**extra_routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**forwarder_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of default +**worker_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**worker_env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**streaming_predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /stream +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/SyncEndpointPredictV1Request.md b/docs/models/SyncEndpointPredictV1Request.md new file mode 100644 index 00000000..96e31f9b --- /dev/null +++ b/docs/models/SyncEndpointPredictV1Request.md @@ -0,0 +1,23 @@ +# launch.api_client.model.sync_endpoint_predict_v1_request.SyncEndpointPredictV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cloudpickle** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**return_pickled** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**destination_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**timeout_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_retries** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/SyncEndpointPredictV1Response.md b/docs/models/SyncEndpointPredictV1Response.md new file mode 100644 index 00000000..3c0779e1 --- /dev/null +++ b/docs/models/SyncEndpointPredictV1Response.md @@ -0,0 +1,18 @@ +# launch.api_client.model.sync_endpoint_predict_v1_response.SyncEndpointPredictV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**status** | [**TaskStatus**](TaskStatus.md) | [**TaskStatus**](TaskStatus.md) | | +**result** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**traceback** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**status_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/TaskStatus.md b/docs/models/TaskStatus.md new file mode 100644 index 00000000..69a16961 --- /dev/null +++ b/docs/models/TaskStatus.md @@ -0,0 +1,9 @@ +# launch.api_client.model.task_status.TaskStatus + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/TensorflowFramework.md b/docs/models/TensorflowFramework.md new file mode 100644 index 00000000..2c40ef1a --- /dev/null +++ b/docs/models/TensorflowFramework.md @@ -0,0 +1,18 @@ +# launch.api_client.model.tensorflow_framework.TensorflowFramework + +This is the entity-layer class for a Tensorflow framework specification. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for a Tensorflow framework specification. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**tensorflow_version** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**framework_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["tensorflow", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/TokenOutput.md b/docs/models/TokenOutput.md new file mode 100644 index 00000000..14acb368 --- /dev/null +++ b/docs/models/TokenOutput.md @@ -0,0 +1,18 @@ +# launch.api_client.model.token_output.TokenOutput + +Detailed token information. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Detailed token information. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**log_prob** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**token** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ToolConfig.md b/docs/models/ToolConfig.md new file mode 100644 index 00000000..d9c98248 --- /dev/null +++ b/docs/models/ToolConfig.md @@ -0,0 +1,20 @@ +# launch.api_client.model.tool_config.ToolConfig + +Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Configuration for tool use. NOTE: this config is highly experimental and signature will change significantly in future iterations. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**max_iterations** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 10 +**execution_timeout_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 60 +**should_retry_on_error** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/TopLogprob.md b/docs/models/TopLogprob.md new file mode 100644 index 00000000..250b4c3f --- /dev/null +++ b/docs/models/TopLogprob.md @@ -0,0 +1,17 @@ +# launch.api_client.model.top_logprob.TopLogprob + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**logprob** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | +**bytes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. | +**token** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The token. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/TritonEnhancedRunnableImageFlavor.md b/docs/models/TritonEnhancedRunnableImageFlavor.md new file mode 100644 index 00000000..79bd1687 --- /dev/null +++ b/docs/models/TritonEnhancedRunnableImageFlavor.md @@ -0,0 +1,37 @@ +# launch.api_client.model.triton_enhanced_runnable_image_flavor.TritonEnhancedRunnableImageFlavor + +For deployments that require tritonserver running in a container. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | For deployments that require tritonserver running in a container. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["triton_enhanced_runnable_image", ] +**protocol** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["http", ] +**tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**triton_commit_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**triton_model_repository** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**triton_num_cpu** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**predict_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /predict +**healthcheck_route** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of /readyz +**env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 120 +**extra_routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**routes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**forwarder_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of default +**worker_command** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**worker_env** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**triton_model_replicas** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**triton_storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**triton_memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**triton_readiness_initial_delay_seconds** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of 300 +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateBatchCompletionsV2Request.md b/docs/models/UpdateBatchCompletionsV2Request.md new file mode 100644 index 00000000..4a93861c --- /dev/null +++ b/docs/models/UpdateBatchCompletionsV2Request.md @@ -0,0 +1,16 @@ +# launch.api_client.model.update_batch_completions_v2_request.UpdateBatchCompletionsV2Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the batch completions job | +**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateBatchCompletionsV2Response.md b/docs/models/UpdateBatchCompletionsV2Response.md new file mode 100644 index 00000000..46b333b3 --- /dev/null +++ b/docs/models/UpdateBatchCompletionsV2Response.md @@ -0,0 +1,25 @@ +# launch.api_client.model.update_batch_completions_v2_response.UpdateBatchCompletionsV2Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**completed_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**expires_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**model_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model configuration for the batch inference. Hardware configurations are inferred. | +**job_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether the update was successful | +**created_at** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**output_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the output file. The output file will be a JSON file of type List[CompletionOutput]. | +**status** | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | [**BatchCompletionsJobStatus**](BatchCompletionsJobStatus.md) | | +**input_data_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent]. | [optional] +**priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Priority of the batch inference job. Default to None. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateBatchJobV1Request.md b/docs/models/UpdateBatchJobV1Request.md new file mode 100644 index 00000000..598ec313 --- /dev/null +++ b/docs/models/UpdateBatchJobV1Request.md @@ -0,0 +1,15 @@ +# launch.api_client.model.update_batch_job_v1_request.UpdateBatchJobV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**cancel** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateBatchJobV1Response.md b/docs/models/UpdateBatchJobV1Response.md new file mode 100644 index 00000000..98c8a03c --- /dev/null +++ b/docs/models/UpdateBatchJobV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.update_batch_job_v1_response.UpdateBatchJobV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateDeepSpeedModelEndpointRequest.md b/docs/models/UpdateDeepSpeedModelEndpointRequest.md new file mode 100644 index 00000000..db8ca526 --- /dev/null +++ b/docs/models/UpdateDeepSpeedModelEndpointRequest.md @@ -0,0 +1,43 @@ +# launch.api_client.model.update_deep_speed_model_endpoint_request.UpdateDeepSpeedModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["deepspeed", ] if omitted the server will use the default value of deepspeed +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateDockerImageBatchJobV1Request.md b/docs/models/UpdateDockerImageBatchJobV1Request.md new file mode 100644 index 00000000..275be51b --- /dev/null +++ b/docs/models/UpdateDockerImageBatchJobV1Request.md @@ -0,0 +1,15 @@ +# launch.api_client.model.update_docker_image_batch_job_v1_request.UpdateDockerImageBatchJobV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**cancel** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateDockerImageBatchJobV1Response.md b/docs/models/UpdateDockerImageBatchJobV1Response.md new file mode 100644 index 00000000..83e0b3ce --- /dev/null +++ b/docs/models/UpdateDockerImageBatchJobV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.update_docker_image_batch_job_v1_response.UpdateDockerImageBatchJobV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateLLMModelEndpointV1Request.md b/docs/models/UpdateLLMModelEndpointV1Request.md new file mode 100644 index 00000000..8a540084 --- /dev/null +++ b/docs/models/UpdateLLMModelEndpointV1Request.md @@ -0,0 +1,9 @@ +# launch.api_client.model.update_llm_model_endpoint_v1_request.UpdateLLMModelEndpointV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateLLMModelEndpointV1Response.md b/docs/models/UpdateLLMModelEndpointV1Response.md new file mode 100644 index 00000000..eb32cf92 --- /dev/null +++ b/docs/models/UpdateLLMModelEndpointV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.update_llm_model_endpoint_v1_response.UpdateLLMModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateModelEndpointV1Request.md b/docs/models/UpdateModelEndpointV1Request.md new file mode 100644 index 00000000..40785371 --- /dev/null +++ b/docs/models/UpdateModelEndpointV1Request.md @@ -0,0 +1,34 @@ +# launch.api_client.model.update_model_endpoint_v1_request.UpdateModelEndpointV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**model_bundle_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**concurrent_requests_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateModelEndpointV1Response.md b/docs/models/UpdateModelEndpointV1Response.md new file mode 100644 index 00000000..52afaefe --- /dev/null +++ b/docs/models/UpdateModelEndpointV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.update_model_endpoint_v1_response.UpdateModelEndpointV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**endpoint_creation_task_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateSGLangModelEndpointRequest.md b/docs/models/UpdateSGLangModelEndpointRequest.md new file mode 100644 index 00000000..332f60b6 --- /dev/null +++ b/docs/models/UpdateSGLangModelEndpointRequest.md @@ -0,0 +1,131 @@ +# launch.api_client.model.update_sg_lang_model_endpoint_request.UpdateSGLangModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["sglang", ] if omitted the server will use the default value of sglang +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false +**tp_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The tensor parallel size. | [optional] +**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If set, skip init tokenizer and pass input_ids in generate request | [optional] +**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. | [optional] +**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. | [optional] +**kv_cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. \"auto\" will use model data type. | [optional] +**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to the JSON file containing the KV cache scaling factors. | [optional] +**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The quantization method. | [optional] +**context_length** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model's maximum context length. | [optional] +**device** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The device type. | [optional] +**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Override the model name returned by the v1/models endpoint in OpenAI API server. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The builtin chat template name or path of the chat template file. | [optional] +**is_embedding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to use a CausalLM as an embedding model. | [optional] +**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. | [optional] +**mem_fraction_static** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The fraction of the memory used for static allocation. | [optional] +**max_running_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of running requests. | [optional] +**max_total_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in the memory pool. | [optional] +**chunked_prefill_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a chunk for the chunked prefill. | [optional] +**max_prefill_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The maximum number of tokens in a prefill batch. | [optional] +**schedule_policy** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The scheduling policy of the requests. | [optional] +**schedule_conservativeness** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How conservative the schedule policy is. | [optional] +**cpu_offload_gb** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | How many GBs of RAM to reserve for CPU offloading | [optional] +**prefill_only_one_req** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, we only prefill one request at one prefill batch | [optional] +**stream_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The interval for streaming in terms of the token length. | [optional] +**random_seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The random seed. | [optional] +**constrained_json_whitespace_pattern** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Regex pattern for syntactic whitespaces allowed in JSON constrained output. | [optional] +**watchdog_timeout** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set watchdog timeout in seconds. | [optional] +**download_dir** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model download directory. | [optional] +**base_gpu_id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The base GPU ID to start allocating GPUs from. | [optional] +**log_level** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of all loggers. | [optional] +**log_level_http** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The logging level of HTTP server. | [optional] +**log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Log the inputs and outputs of all requests. | [optional] +**show_time_cost** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Show time cost of custom marks. | [optional] +**enable_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable log prometheus metrics. | [optional] +**decode_log_interval** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The log interval of decode batch. | [optional] +**api_key** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set API key of the server. | [optional] +**file_storage_pth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the file storage in backend. | [optional] +**enable_cache_report** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Return number of cached tokens in usage.prompt_tokens_details. | [optional] +**data_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The data parallelism size. | [optional] +**load_balance_method** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The load balancing strategy for data parallelism. | [optional] +**expert_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The expert parallelism size. | [optional] +**dist_init_addr** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The host address for initializing distributed backend. | [optional] +**nnodes** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of nodes. | [optional] +**node_rank** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The node rank. | [optional] +**json_model_override_args** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A dictionary in JSON string format used to override default model configurations. | [optional] +**lora_paths** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The list of LoRA adapters. | [optional] +**max_loras_per_batch** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of adapters for a running batch. | [optional] +**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for attention layers. | [optional] +**sampling_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the kernels for sampling layers. | [optional] +**grammar_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Choose the backend for grammar-guided decoding. | [optional] +**speculative_algorithm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Speculative algorithm. | [optional] +**speculative_draft_model_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the draft model weights. | [optional] +**speculative_num_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of steps sampled from draft model in Speculative Decoding. | [optional] +**speculative_num_draft_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in Speculative Decoding. | [optional] +**speculative_eagle_topk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of token sampled from draft model in eagle2 each step. | [optional] +**enable_double_sparsity** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable double sparsity attention | [optional] +**ds_channel_config_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The path of the double sparsity channel config | [optional] +**ds_heavy_channel_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy channels in double sparsity attention | [optional] +**ds_heavy_token_num** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of heavy tokens in double sparsity attention | [optional] +**ds_heavy_channel_type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of heavy channels in double sparsity attention | [optional] +**ds_sparse_decode_threshold** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The threshold for sparse decoding in double sparsity attention | [optional] +**disable_radix_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable RadixAttention for prefix caching. | [optional] +**disable_jump_forward** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable jump-forward for grammar-guided decoding. | [optional] +**disable_cuda_graph** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph. | [optional] +**disable_cuda_graph_padding** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable cuda graph when padding is needed. | [optional] +**disable_outlines_disk_cache** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable disk cache of outlines. | [optional] +**disable_custom_all_reduce** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the custom all-reduce kernel. | [optional] +**disable_mla** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable Multi-head Latent Attention (MLA) for DeepSeek-V2. | [optional] +**disable_overlap_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable the overlap scheduler. | [optional] +**enable_mixed_chunk** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable mixing prefill and decode in a batch when using chunked prefill. | [optional] +**enable_dp_attention** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable data parallelism for attention and tensor parallelism for FFN. | [optional] +**enable_ep_moe** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable expert parallelism for moe. | [optional] +**enable_torch_compile** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torch.compile. | [optional] +**torch_compile_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size when using torch compile. | [optional] +**cuda_graph_max_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the maximum batch size for cuda graph. | [optional] +**cuda_graph_bs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Set the list of batch sizes for cuda graph. | [optional] +**torchao_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Optimize the model with torchao. | [optional] +**enable_nan_detection** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable the NaN detection for debugging purposes. | [optional] +**enable_p2p_check** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable P2P check for GPU access. | [optional] +**triton_attention_reduce_in_fp32** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Cast the intermediate attention results to fp32. | [optional] +**triton_attention_num_kv_splits** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The number of KV splits in flash decoding Triton kernel. | [optional] +**num_continuous_decode_steps** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Run multiple continuous decoding steps to reduce scheduling overhead. | [optional] +**delete_ckpt_after_loading** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Delete the model checkpoint after loading the model. | [optional] +**enable_memory_saver** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow saving memory using release_memory_occupation and resume_memory_occupation | [optional] +**allow_auto_truncate** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Allow automatically truncating requests that exceed the maximum input length. | [optional] +**enable_custom_logit_processor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable users to pass custom logit processors to the server. | [optional] +**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Specify the parser for handling tool-call interactions. | [optional] +**huggingface_repo** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The Hugging Face repository ID. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md b/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md new file mode 100644 index 00000000..4e274db2 --- /dev/null +++ b/docs/models/UpdateTextGenerationInferenceModelEndpointRequest.md @@ -0,0 +1,43 @@ +# launch.api_client.model.update_text_generation_inference_model_endpoint_request.UpdateTextGenerationInferenceModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["text_generation_inference", ] if omitted the server will use the default value of text_generation_inference +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateTriggerV1Request.md b/docs/models/UpdateTriggerV1Request.md new file mode 100644 index 00000000..f3fd0c9d --- /dev/null +++ b/docs/models/UpdateTriggerV1Request.md @@ -0,0 +1,16 @@ +# launch.api_client.model.update_trigger_v1_request.UpdateTriggerV1Request + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**cron_schedule** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**suspend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateTriggerV1Response.md b/docs/models/UpdateTriggerV1Response.md new file mode 100644 index 00000000..9209cc28 --- /dev/null +++ b/docs/models/UpdateTriggerV1Response.md @@ -0,0 +1,15 @@ +# launch.api_client.model.update_trigger_v1_response.UpdateTriggerV1Response + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**success** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UpdateVLLMModelEndpointRequest.md b/docs/models/UpdateVLLMModelEndpointRequest.md new file mode 100644 index 00000000..e1a43566 --- /dev/null +++ b/docs/models/UpdateVLLMModelEndpointRequest.md @@ -0,0 +1,81 @@ +# launch.api_client.model.update_vllm_model_endpoint_request.UpdateVLLMModelEndpointRequest + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**quantize** | [**Quantization**](Quantization.md) | [**Quantization**](Quantization.md) | | [optional] +**checkpoint_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**post_inference_hooks** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**cpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpus** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**memory** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**gpu_type** | [**GpuType**](GpuType.md) | [**GpuType**](GpuType.md) | | [optional] +**storage** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**nodes_per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**optimize_costs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**prewarm** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**high_priority** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**billing_tags** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**default_callback_auth** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**public_inference** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of true +**chat_template_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**enable_startup_metrics** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases. | [optional] if omitted the server will use the default value of false +**model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**source** | [**LLMSource**](LLMSource.md) | [**LLMSource**](LLMSource.md) | | [optional] +**inference_framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] must be one of ["vllm", ] if omitted the server will use the default value of vllm +**inference_framework_image_tag** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**num_shards** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**metadata** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**force_bundle_recreation** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] if omitted the server will use the default value of false +**min_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**max_workers** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**per_worker** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**labels** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**max_gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig | [optional] +**attention_backend** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Attention backend to use for vLLM. Default to None. | [optional] +**max_model_len** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Model context length, If unspecified, will be automatically derived from the model config | [optional] +**max_num_seqs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of sequences per iteration | [optional] +**enforce_eager** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility | [optional] +**trust_remote_code** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False. | [optional] if omitted the server will use the default value of false +**pipeline_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of pipeline stages. Default to None. | [optional] +**tensor_parallel_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of tensor parallel replicas. Default to None. | [optional] +**quantization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights. | [optional] +**disable_log_requests** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Disable logging requests. Default to None. | [optional] +**chat_template** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint | [optional] +**tool_call_parser** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tool call parser | [optional] +**enable_auto_tool_choice** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enable auto tool choice | [optional] +**load_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The format of the model weights to load. * \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available. * \"pt\" will load the weights in the pytorch bin format. * \"safetensors\" will load the weights in the safetensors format. * \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading. * \"dummy\" will initialize the weights with random values, which is mainly for profiling. * \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information. * \"bitsandbytes\" will load the weights using bitsandbytes quantization. | [optional] +**config_format** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'. | [optional] +**tokenizer_mode** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`. | [optional] +**limit_mm_per_prompt** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of data instances per modality per prompt. Only applicable for multimodal models. | [optional] +**max_num_batched_tokens** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum number of batched tokens per iteration | [optional] +**tokenizer** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Name or path of the huggingface tokenizer to use. | [optional] +**dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models. | [optional] +**seed** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Random seed for reproducibility. | [optional] +**revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**code_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**rope_scaling** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum. | [optional] +**tokenizer_revision** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. | [optional] +**quantization_param_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm. | [optional] +**max_seq_len_to_capture** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode. | [optional] +**disable_sliding_window** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored. | [optional] +**skip_tokenizer_init** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | If true, skip initialization of tokenizer and detokenizer. | [optional] +**served_model_name** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`. | [optional] +**override_neuron_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments. | [optional] +**mm_processor_kwargs** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. | [optional] +**block_size** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of a cache block in number of tokens. | [optional] +**gpu_memory_utilization** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Fraction of GPU memory to use for the vLLM execution. | [optional] +**swap_space** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Size of the CPU swap space per GPU (in GiB). | [optional] +**cache_dtype** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Data type for kv cache storage. | [optional] +**num_gpu_blocks_override** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. | [optional] +**enable_prefix_caching** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Enables automatic prefix caching. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UploadFileResponse.md b/docs/models/UploadFileResponse.md new file mode 100644 index 00000000..3dd7eefc --- /dev/null +++ b/docs/models/UploadFileResponse.md @@ -0,0 +1,17 @@ +# launch.api_client.model.upload_file_response.UploadFileResponse + +Response object for uploading a file. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Response object for uploading a file. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**id** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | ID of the uploaded file. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UrlCitation.md b/docs/models/UrlCitation.md new file mode 100644 index 00000000..b869f9e1 --- /dev/null +++ b/docs/models/UrlCitation.md @@ -0,0 +1,18 @@ +# launch.api_client.model.url_citation.UrlCitation + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**start_index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the first character of the URL citation in the message. | +**end_index** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The index of the last character of the URL citation in the message. | +**title** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The title of the web resource. | +**url** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The URL of the web resource. | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/UserLocation.md b/docs/models/UserLocation.md new file mode 100644 index 00000000..f0323db4 --- /dev/null +++ b/docs/models/UserLocation.md @@ -0,0 +1,16 @@ +# launch.api_client.model.user_location.UserLocation + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**approximate** | [**WebSearchLocation**](WebSearchLocation.md) | [**WebSearchLocation**](WebSearchLocation.md) | | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The type of location approximation. Always `approximate`. | must be one of ["approximate", ] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ValidationError.md b/docs/models/ValidationError.md new file mode 100644 index 00000000..1d560469 --- /dev/null +++ b/docs/models/ValidationError.md @@ -0,0 +1,17 @@ +# launch.api_client.model.validation_error.ValidationError + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**msg** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**loc** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**type** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/VoiceIdsShared.md b/docs/models/VoiceIdsShared.md new file mode 100644 index 00000000..b0a19a72 --- /dev/null +++ b/docs/models/VoiceIdsShared.md @@ -0,0 +1,9 @@ +# launch.api_client.model.voice_ids_shared.VoiceIdsShared + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/WebSearchContextSize.md b/docs/models/WebSearchContextSize.md new file mode 100644 index 00000000..c19ce47b --- /dev/null +++ b/docs/models/WebSearchContextSize.md @@ -0,0 +1,11 @@ +# launch.api_client.model.web_search_context_size.WebSearchContextSize + +High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default. | + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/WebSearchLocation.md b/docs/models/WebSearchLocation.md new file mode 100644 index 00000000..2ea05b5b --- /dev/null +++ b/docs/models/WebSearchLocation.md @@ -0,0 +1,18 @@ +# launch.api_client.model.web_search_location.WebSearchLocation + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**country** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user, e.g. `US`. | [optional] +**region** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Free text input for the region of the user, e.g. `California`. | [optional] +**city** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | Free text input for the city of the user, e.g. `San Francisco`. | [optional] +**timezone** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user, e.g. `America/Los_Angeles`. | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/WebSearchOptions.md b/docs/models/WebSearchOptions.md new file mode 100644 index 00000000..9e79c6be --- /dev/null +++ b/docs/models/WebSearchOptions.md @@ -0,0 +1,16 @@ +# launch.api_client.model.web_search_options.WebSearchOptions + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**user_location** | [**UserLocation**](UserLocation.md) | [**UserLocation**](UserLocation.md) | Approximate location parameters for the search. | [optional] +**search_context_size** | [**WebSearchContextSize**](WebSearchContextSize.md) | [**WebSearchContextSize**](WebSearchContextSize.md) | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/docs/models/ZipArtifactFlavor.md b/docs/models/ZipArtifactFlavor.md new file mode 100644 index 00000000..f263d10f --- /dev/null +++ b/docs/models/ZipArtifactFlavor.md @@ -0,0 +1,23 @@ +# launch.api_client.model.zip_artifact_flavor.ZipArtifactFlavor + +This is the entity-layer class for the Model Bundle flavor of a zip artifact. + +## Model Type Info +Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- +dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | This is the entity-layer class for the Model Bundle flavor of a zip artifact. | + +### Dictionary Keys +Key | Input Type | Accessed Type | Description | Notes +------------ | ------------- | ------------- | ------------- | ------------- +**flavor** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | must be one of ["zip_artifact", ] +**requirements** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**framework** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**load_model_fn_module_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**location** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**load_predict_fn_module_path** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | +**app_config** | dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, | frozendict.frozendict, str, decimal.Decimal, BoolClass, NoneClass, tuple, bytes, FileIO | | [optional] +**any_string_name** | dict, frozendict.frozendict, str, date, datetime, int, float, bool, decimal.Decimal, None, list, tuple, bytes, io.FileIO, io.BufferedReader | frozendict.frozendict, str, BoolClass, decimal.Decimal, NoneClass, tuple, bytes, FileIO | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../../README.md#documentation-for-models) [[Back to API list]](../../README.md#documentation-for-api-endpoints) [[Back to README]](../../README.md) + diff --git a/justfile b/justfile new file mode 100644 index 00000000..080e225b --- /dev/null +++ b/justfile @@ -0,0 +1,41 @@ +# Client regeneration from llm-engine OpenAPI schema + +# Configuration +llm_engine_repo := "scaleapi/llm-engine" +default_branch := "main" +schema_path := "model-engine/specs/openapi-3.0.json" +generator_version := "6.4.0" + +# Fetch the OpenAPI schema from llm-engine repo +fetch-schema branch=default_branch: + @echo "Fetching OpenAPI 3.0 schema from {{llm_engine_repo}} (branch: {{branch}})..." + curl -sSL "https://raw.githubusercontent.com/{{llm_engine_repo}}/{{branch}}/{{schema_path}}" -o openapi.json + @echo "Schema saved to openapi.json" + +# Generate client code from openapi.json +generate: + #!/usr/bin/env bash + set -euo pipefail + if [ ! -f openapi.json ]; then + echo "Error: openapi.json not found. Run 'just fetch-schema' first." + exit 1 + fi + echo "Generating client with OpenAPI Generator {{generator_version}}..." + docker run --rm \ + -v "$(pwd):/local" \ + openapitools/openapi-generator-cli:v{{generator_version}} generate \ + -i /local/openapi.json \ + -g python \ + -o /local \ + --package-name launch.api_client \ + --additional-properties=generateSourceCodeOnly=true + echo "Client generated. Review changes with 'git diff'" + +# Fetch schema and regenerate client +regenerate branch=default_branch: (fetch-schema branch) generate + +# Show current schema source info +info: + @echo "Schema source: https://github.com/{{llm_engine_repo}}/blob/{{default_branch}}/{{schema_path}}" + @echo "Generator version: {{generator_version}}" + @test -f openapi.json && echo "Local schema: openapi.json (exists)" || echo "Local schema: openapi.json (not found)" diff --git a/launch/api_client/__init__.py b/launch/api_client/__init__.py index 9661aac8..7c58d56a 100644 --- a/launch/api_client/__init__.py +++ b/launch/api_client/__init__.py @@ -11,7 +11,7 @@ Generated by: https://openapi-generator.tech """ -__version__ = "1.1.2" +__version__ = "1.0.0" # import ApiClient from launch.api_client.api_client import ApiClient diff --git a/launch/api_client/api_client.py b/launch/api_client/api_client.py index 9cdf8df8..a417ae3f 100644 --- a/launch/api_client/api_client.py +++ b/launch/api_client/api_client.py @@ -53,7 +53,7 @@ def __eq__(self, other): class JSONEncoder(json.JSONEncoder): - compact_separators = (",", ":") + compact_separators = (',', ':') def default(self, obj): if isinstance(obj, str): @@ -74,24 +74,24 @@ def default(self, obj): return {key: self.default(val) for key, val in obj.items()} elif isinstance(obj, (list, tuple)): return [self.default(item) for item in obj] - raise ApiValueError("Unable to prepare type {} for serialization".format(obj.__class__.__name__)) + raise ApiValueError('Unable to prepare type {} for serialization'.format(obj.__class__.__name__)) class ParameterInType(enum.Enum): - QUERY = "query" - HEADER = "header" - PATH = "path" - COOKIE = "cookie" + QUERY = 'query' + HEADER = 'header' + PATH = 'path' + COOKIE = 'cookie' class ParameterStyle(enum.Enum): - MATRIX = "matrix" - LABEL = "label" - FORM = "form" - SIMPLE = "simple" - SPACE_DELIMITED = "spaceDelimited" - PIPE_DELIMITED = "pipeDelimited" - DEEP_OBJECT = "deepObject" + MATRIX = 'matrix' + LABEL = 'label' + FORM = 'form' + SIMPLE = 'simple' + SPACE_DELIMITED = 'spaceDelimited' + PIPE_DELIMITED = 'pipeDelimited' + DEEP_OBJECT = 'deepObject' class PrefixSeparatorIterator: @@ -101,10 +101,10 @@ def __init__(self, prefix: str, separator: str): self.prefix = prefix self.separator = separator self.first = True - if separator in {".", "|", "%20"}: + if separator in {'.', '|', '%20'}: item_separator = separator else: - item_separator = "," + item_separator = ',' self.item_separator = item_separator def __iter__(self): @@ -146,7 +146,7 @@ def __ref6570_item_value(in_data: typing.Any, percent_encode: bool): elif isinstance(in_data, dict) and not in_data: # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 return None - raise ApiValueError("Unable to generate a ref6570 item representation of {}".format(in_data)) + raise ApiValueError('Unable to generate a ref6570 item representation of {}'.format(in_data)) @staticmethod def _to_dict(name: str, value: str): @@ -161,12 +161,12 @@ def __ref6570_str_float_int_expansion( percent_encode: bool, prefix_separator_iterator: PrefixSeparatorIterator, var_name_piece: str, - named_parameter_expansion: bool, + named_parameter_expansion: bool ) -> str: item_value = cls.__ref6570_item_value(in_data, percent_encode) - if item_value is None or (item_value == "" and prefix_separator_iterator.separator == ";"): + if item_value is None or (item_value == '' and prefix_separator_iterator.separator == ';'): return next(prefix_separator_iterator) + var_name_piece - value_pair_equals = "=" if named_parameter_expansion else "" + value_pair_equals = '=' if named_parameter_expansion else '' return next(prefix_separator_iterator) + var_name_piece + value_pair_equals + item_value @classmethod @@ -178,20 +178,20 @@ def __ref6570_list_expansion( percent_encode: bool, prefix_separator_iterator: PrefixSeparatorIterator, var_name_piece: str, - named_parameter_expansion: bool, + named_parameter_expansion: bool ) -> str: item_values = [cls.__ref6570_item_value(v, percent_encode) for v in in_data] item_values = [v for v in item_values if v is not None] if not item_values: # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 return "" - value_pair_equals = "=" if named_parameter_expansion else "" + value_pair_equals = '=' if named_parameter_expansion else '' if not explode: return ( - next(prefix_separator_iterator) - + var_name_piece - + value_pair_equals - + prefix_separator_iterator.item_separator.join(item_values) + next(prefix_separator_iterator) + + var_name_piece + + value_pair_equals + + prefix_separator_iterator.item_separator.join(item_values) ) # exploded return next(prefix_separator_iterator) + next(prefix_separator_iterator).join( @@ -207,27 +207,27 @@ def __ref6570_dict_expansion( percent_encode: bool, prefix_separator_iterator: PrefixSeparatorIterator, var_name_piece: str, - named_parameter_expansion: bool, + named_parameter_expansion: bool ) -> str: in_data_transformed = {key: cls.__ref6570_item_value(val, percent_encode) for key, val in in_data.items()} in_data_transformed = {key: val for key, val in in_data_transformed.items() if val is not None} if not in_data_transformed: # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 return "" - value_pair_equals = "=" if named_parameter_expansion else "" + value_pair_equals = '=' if named_parameter_expansion else '' if not explode: return ( - next(prefix_separator_iterator) - + var_name_piece - + value_pair_equals - + prefix_separator_iterator.item_separator.join( - prefix_separator_iterator.item_separator.join(item_pair) - for item_pair in in_data_transformed.items() + next(prefix_separator_iterator) + + var_name_piece + value_pair_equals + + prefix_separator_iterator.item_separator.join( + prefix_separator_iterator.item_separator.join( + item_pair + ) for item_pair in in_data_transformed.items() ) ) # exploded return next(prefix_separator_iterator) + next(prefix_separator_iterator).join( - [key + "=" + val for key, val in in_data_transformed.items()] + [key + '=' + val for key, val in in_data_transformed.items()] ) @classmethod @@ -237,13 +237,13 @@ def _ref6570_expansion( in_data: typing.Any, explode: bool, percent_encode: bool, - prefix_separator_iterator: PrefixSeparatorIterator, + prefix_separator_iterator: PrefixSeparatorIterator ) -> str: """ Separator is for separate variables like dict with explode true, not for array item separation """ - named_parameter_expansion = prefix_separator_iterator.separator in {"&", ";"} - var_name_piece = variable_name if named_parameter_expansion else "" + named_parameter_expansion = prefix_separator_iterator.separator in {'&', ';'} + var_name_piece = variable_name if named_parameter_expansion else '' if type(in_data) in {str, float, int}: return cls.__ref6570_str_float_int_expansion( variable_name, @@ -252,7 +252,7 @@ def _ref6570_expansion( percent_encode, prefix_separator_iterator, var_name_piece, - named_parameter_expansion, + named_parameter_expansion ) elif isinstance(in_data, none_type): # ignored by the expansion process https://datatracker.ietf.org/doc/html/rfc6570#section-3.2.1 @@ -265,7 +265,7 @@ def _ref6570_expansion( percent_encode, prefix_separator_iterator, var_name_piece, - named_parameter_expansion, + named_parameter_expansion ) elif isinstance(in_data, dict): return cls.__ref6570_dict_expansion( @@ -275,10 +275,10 @@ def _ref6570_expansion( percent_encode, prefix_separator_iterator, var_name_piece, - named_parameter_expansion, + named_parameter_expansion ) # bool, bytes, etc - raise ApiValueError("Unable to generate a ref6570 representation of {}".format(in_data)) + raise ApiValueError('Unable to generate a ref6570 representation of {}'.format(in_data)) class StyleFormSerializer(ParameterSerializerBase): @@ -294,34 +294,35 @@ def _serialize_form( name: str, explode: bool, percent_encode: bool, - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None, + prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None ) -> str: if prefix_separator_iterator is None: - prefix_separator_iterator = PrefixSeparatorIterator("", "&") + prefix_separator_iterator = PrefixSeparatorIterator('', '&') return self._ref6570_expansion( variable_name=name, in_data=in_data, explode=explode, percent_encode=percent_encode, - prefix_separator_iterator=prefix_separator_iterator, + prefix_separator_iterator=prefix_separator_iterator ) class StyleSimpleSerializer(ParameterSerializerBase): + def _serialize_simple( self, in_data: typing.Union[None, int, float, str, bool, dict, list], name: str, explode: bool, - percent_encode: bool, + percent_encode: bool ) -> str: - prefix_separator_iterator = PrefixSeparatorIterator("", ",") + prefix_separator_iterator = PrefixSeparatorIterator('', ',') return self._ref6570_expansion( variable_name=name, in_data=in_data, explode=explode, percent_encode=percent_encode, - prefix_separator_iterator=prefix_separator_iterator, + prefix_separator_iterator=prefix_separator_iterator ) @@ -333,7 +334,6 @@ class JSONDetector: application/json-patch+json application/geo+json """ - __json_content_type_pattern = re.compile("application/[^+]*[+]?(json);?.*") @classmethod @@ -369,7 +369,7 @@ class ParameterBase(JSONDetector): ParameterInType.HEADER: ParameterStyle.SIMPLE, ParameterInType.COOKIE: ParameterStyle.FORM, } - __disallowed_header_names = {"Accept", "Content-Type", "Authorization"} + __disallowed_header_names = {'Accept', 'Content-Type', 'Authorization'} _json_encoder = JSONEncoder() @classmethod @@ -379,7 +379,7 @@ def __verify_style_to_in_type(cls, style: typing.Optional[ParameterStyle], in_ty in_type_set = cls.__style_to_in_type[style] if in_type not in in_type_set: raise ValueError( - "Invalid style and in_type combination. For style={} only in_type={} are allowed".format( + 'Invalid style and in_type combination. For style={} only in_type={} are allowed'.format( style, in_type_set ) ) @@ -393,19 +393,19 @@ def __init__( explode: bool = False, allow_reserved: typing.Optional[bool] = None, schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None, + content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None ): if schema is None and content is None: - raise ValueError("Value missing; Pass in either schema or content") + raise ValueError('Value missing; Pass in either schema or content') if schema and content: - raise ValueError("Too many values provided. Both schema and content were provided. Only one may be input") + raise ValueError('Too many values provided. Both schema and content were provided. Only one may be input') if name in self.__disallowed_header_names and in_type is ParameterInType.HEADER: - raise ValueError("Invalid name, name may not be one of {}".format(self.__disallowed_header_names)) + raise ValueError('Invalid name, name may not be one of {}'.format(self.__disallowed_header_names)) self.__verify_style_to_in_type(style, in_type) if content is None and style is None: style = self.__in_type_to_default_style[in_type] if content is not None and in_type in self.__in_type_to_default_style and len(content) != 1: - raise ValueError("Invalid content length, content length must equal 1") + raise ValueError('Invalid content length, content length must equal 1') self.in_type = in_type self.name = name self.required = required @@ -416,7 +416,9 @@ def __init__( self.content = content def _serialize_json( - self, in_data: typing.Union[None, int, float, str, bool, dict, list], eliminate_whitespace: bool = False + self, + in_data: typing.Union[None, int, float, str, bool, dict, list], + eliminate_whitespace: bool = False ) -> str: if eliminate_whitespace: return json.dumps(in_data, separators=self._json_encoder.compact_separators) @@ -424,6 +426,7 @@ def _serialize_json( class PathParameter(ParameterBase, StyleSimpleSerializer): + def __init__( self, name: str, @@ -432,7 +435,7 @@ def __init__( explode: bool = False, allow_reserved: typing.Optional[bool] = None, schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None, + content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None ): super().__init__( name, @@ -442,32 +445,34 @@ def __init__( explode=explode, allow_reserved=allow_reserved, schema=schema, - content=content, + content=content ) def __serialize_label( - self, in_data: typing.Union[None, int, float, str, bool, dict, list] + self, + in_data: typing.Union[None, int, float, str, bool, dict, list] ) -> typing.Dict[str, str]: - prefix_separator_iterator = PrefixSeparatorIterator(".", ".") + prefix_separator_iterator = PrefixSeparatorIterator('.', '.') value = self._ref6570_expansion( variable_name=self.name, in_data=in_data, explode=self.explode, percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator, + prefix_separator_iterator=prefix_separator_iterator ) return self._to_dict(self.name, value) def __serialize_matrix( - self, in_data: typing.Union[None, int, float, str, bool, dict, list] + self, + in_data: typing.Union[None, int, float, str, bool, dict, list] ) -> typing.Dict[str, str]: - prefix_separator_iterator = PrefixSeparatorIterator(";", ";") + prefix_separator_iterator = PrefixSeparatorIterator(';', ';') value = self._ref6570_expansion( variable_name=self.name, in_data=in_data, explode=self.explode, percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator, + prefix_separator_iterator=prefix_separator_iterator ) return self._to_dict(self.name, value) @@ -475,14 +480,18 @@ def __serialize_simple( self, in_data: typing.Union[None, int, float, str, bool, dict, list], ) -> typing.Dict[str, str]: - value = self._serialize_simple(in_data=in_data, name=self.name, explode=self.explode, percent_encode=True) + value = self._serialize_simple( + in_data=in_data, + name=self.name, + explode=self.explode, + percent_encode=True + ) return self._to_dict(self.name, value) def serialize( self, in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict - ], + Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict] ) -> typing.Dict[str, str]: if self.schema: cast_in_data = self.schema(in_data) @@ -510,10 +519,11 @@ def serialize( if self._content_type_is_json(content_type): value = self._serialize_json(cast_in_data) return self._to_dict(self.name, value) - raise NotImplementedError("Serialization of {} has not yet been implemented".format(content_type)) + raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) class QueryParameter(ParameterBase, StyleFormSerializer): + def __init__( self, name: str, @@ -522,7 +532,7 @@ def __init__( explode: typing.Optional[bool] = None, allow_reserved: typing.Optional[bool] = None, schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None, + content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None ): used_style = ParameterStyle.FORM if style is None else style used_explode = self._get_default_explode(used_style) if explode is None else explode @@ -535,13 +545,13 @@ def __init__( explode=used_explode, allow_reserved=allow_reserved, schema=schema, - content=content, + content=content ) def __serialize_space_delimited( self, in_data: typing.Union[None, int, float, str, bool, dict, list], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator], + prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] ) -> typing.Dict[str, str]: if prefix_separator_iterator is None: prefix_separator_iterator = self.get_prefix_separator_iterator() @@ -550,14 +560,14 @@ def __serialize_space_delimited( in_data=in_data, explode=self.explode, percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator, + prefix_separator_iterator=prefix_separator_iterator ) return self._to_dict(self.name, value) def __serialize_pipe_delimited( self, in_data: typing.Union[None, int, float, str, bool, dict, list], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator], + prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] ) -> typing.Dict[str, str]: if prefix_separator_iterator is None: prefix_separator_iterator = self.get_prefix_separator_iterator() @@ -566,14 +576,14 @@ def __serialize_pipe_delimited( in_data=in_data, explode=self.explode, percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator, + prefix_separator_iterator=prefix_separator_iterator ) return self._to_dict(self.name, value) def __serialize_form( self, in_data: typing.Union[None, int, float, str, bool, dict, list], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator], + prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] ) -> typing.Dict[str, str]: if prefix_separator_iterator is None: prefix_separator_iterator = self.get_prefix_separator_iterator() @@ -582,24 +592,23 @@ def __serialize_form( name=self.name, explode=self.explode, percent_encode=True, - prefix_separator_iterator=prefix_separator_iterator, + prefix_separator_iterator=prefix_separator_iterator ) return self._to_dict(self.name, value) def get_prefix_separator_iterator(self) -> typing.Optional[PrefixSeparatorIterator]: if self.style is ParameterStyle.FORM: - return PrefixSeparatorIterator("?", "&") + return PrefixSeparatorIterator('?', '&') elif self.style is ParameterStyle.SPACE_DELIMITED: - return PrefixSeparatorIterator("", "%20") + return PrefixSeparatorIterator('', '%20') elif self.style is ParameterStyle.PIPE_DELIMITED: - return PrefixSeparatorIterator("", "|") + return PrefixSeparatorIterator('', '|') def serialize( self, in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict - ], - prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None, + Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict], + prefix_separator_iterator: typing.Optional[PrefixSeparatorIterator] = None ) -> typing.Dict[str, str]: if self.schema: cast_in_data = self.schema(in_data) @@ -633,11 +642,15 @@ def serialize( cast_in_data = self._json_encoder.default(cast_in_data) if self._content_type_is_json(content_type): value = self._serialize_json(cast_in_data, eliminate_whitespace=True) - return self._to_dict(self.name, next(prefix_separator_iterator) + self.name + "=" + quote(value)) - raise NotImplementedError("Serialization of {} has not yet been implemented".format(content_type)) + return self._to_dict( + self.name, + next(prefix_separator_iterator) + self.name + '=' + quote(value) + ) + raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) class CookieParameter(ParameterBase, StyleFormSerializer): + def __init__( self, name: str, @@ -646,7 +659,7 @@ def __init__( explode: typing.Optional[bool] = None, allow_reserved: typing.Optional[bool] = None, schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None, + content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None ): used_style = ParameterStyle.FORM if style is None and content is None and schema else style used_explode = self._get_default_explode(used_style) if explode is None else explode @@ -659,14 +672,13 @@ def __init__( explode=used_explode, allow_reserved=allow_reserved, schema=schema, - content=content, + content=content ) def serialize( self, in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict - ], + Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict] ) -> typing.Dict[str, str]: if self.schema: cast_in_data = self.schema(in_data) @@ -685,7 +697,7 @@ def serialize( explode=self.explode, name=self.name, percent_encode=False, - prefix_separator_iterator=PrefixSeparatorIterator("", "&"), + prefix_separator_iterator=PrefixSeparatorIterator('', '&') ) return self._to_dict(self.name, value) # self.content will be length one @@ -695,7 +707,7 @@ def serialize( if self._content_type_is_json(content_type): value = self._serialize_json(cast_in_data) return self._to_dict(self.name, value) - raise NotImplementedError("Serialization of {} has not yet been implemented".format(content_type)) + raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) class HeaderParameter(ParameterBase, StyleSimpleSerializer): @@ -707,7 +719,7 @@ def __init__( explode: bool = False, allow_reserved: typing.Optional[bool] = None, schema: typing.Optional[typing.Type[Schema]] = None, - content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None, + content: typing.Optional[typing.Dict[str, typing.Type[Schema]]] = None ): super().__init__( name, @@ -717,7 +729,7 @@ def __init__( explode=explode, allow_reserved=allow_reserved, schema=schema, - content=content, + content=content ) @staticmethod @@ -732,8 +744,7 @@ def __to_headers(in_data: typing.Tuple[typing.Tuple[str, str], ...]) -> HTTPHead def serialize( self, in_data: typing.Union[ - Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict - ], + Schema, Decimal, int, float, str, date, datetime, None, bool, list, tuple, dict, frozendict.frozendict] ) -> HTTPHeaderDict: if self.schema: cast_in_data = self.schema(in_data) @@ -753,7 +764,7 @@ def serialize( if self._content_type_is_json(content_type): value = self._serialize_json(cast_in_data) return self.__to_headers(((self.name, value),)) - raise NotImplementedError("Serialization of {} has not yet been implemented".format(content_type)) + raise NotImplementedError('Serialization of {} has not yet been implemented'.format(content_type)) class Encoding: @@ -782,7 +793,6 @@ class MediaType: The encoding object SHALL only apply to requestBody objects when the media type is multipart or application/x-www-form-urlencoded. """ - schema: typing.Optional[typing.Type[Schema]] = None encoding: typing.Optional[typing.Dict[str, Encoding]] = None @@ -797,7 +807,7 @@ def __init__( self, response: urllib3.HTTPResponse, body: typing.Union[Unset, Schema] = unset, - headers: typing.Union[Unset, typing.Dict[str, Schema]] = unset, + headers: typing.Union[Unset, typing.Dict[str, Schema]] = unset ): """ pycharm needs this to prevent 'Unexpected argument' warnings @@ -825,7 +835,7 @@ def __init__( ): self.headers = headers if content is not None and len(content) == 0: - raise ValueError("Invalid value for content, the content dict must have >= 1 entry") + raise ValueError('Invalid value for content, the content dict must have >= 1 entry') self.content = content self.response_cls = response_cls @@ -866,16 +876,17 @@ def __deserialize_application_octet_stream( a file will be written and returned """ if response.supports_chunked_reads(): - file_name = self.__file_name_from_content_disposition( - response.headers.get("content-disposition") - ) or self.__file_name_from_response_url(response.geturl()) + file_name = ( + self.__file_name_from_content_disposition(response.headers.get('content-disposition')) + or self.__file_name_from_response_url(response.geturl()) + ) if file_name is None: _fd, path = tempfile.mkstemp() else: path = os.path.join(tempfile.gettempdir(), file_name) - with open(path, "wb") as new_file: + with open(path, 'wb') as new_file: chunk_size = 1024 while True: data = response.read(chunk_size) @@ -884,25 +895,27 @@ def __deserialize_application_octet_stream( new_file.write(data) # release_conn is needed for streaming connections only response.release_conn() - new_file = open(path, "rb") + new_file = open(path, 'rb') return new_file else: return response.data @staticmethod - def __deserialize_multipart_form_data(response: urllib3.HTTPResponse) -> typing.Dict[str, typing.Any]: + def __deserialize_multipart_form_data( + response: urllib3.HTTPResponse + ) -> typing.Dict[str, typing.Any]: msg = email.message_from_bytes(response.data) return { - part.get_param("name", header="Content-Disposition"): part.get_payload(decode=True).decode( - part.get_content_charset() - ) + part.get_param("name", header="Content-Disposition"): part.get_payload( + decode=True + ).decode(part.get_content_charset()) if part.get_content_charset() else part.get_payload() for part in msg.get_payload() } def deserialize(self, response: urllib3.HTTPResponse, configuration: Configuration) -> ApiResponse: - content_type = response.getheader("content-type") + content_type = response.getheader('content-type') deserialized_body = unset streamed = response.supports_chunked_reads() @@ -920,22 +933,31 @@ def deserialize(self, response: urllib3.HTTPResponse, configuration: Configurati body_schema = self.content[content_type].schema if body_schema is None: # some specs do not define response content media type schemas - return self.response_cls(response=response, headers=deserialized_headers, body=unset) + return self.response_cls( + response=response, + headers=deserialized_headers, + body=unset + ) if self._content_type_is_json(content_type): body_data = self.__deserialize_json(response) - elif content_type == "application/octet-stream": + elif content_type == 'application/octet-stream': body_data = self.__deserialize_application_octet_stream(response) - elif content_type.startswith("multipart/form-data"): + elif content_type.startswith('multipart/form-data'): body_data = self.__deserialize_multipart_form_data(response) - content_type = "multipart/form-data" + content_type = 'multipart/form-data' else: - raise NotImplementedError("Deserialization of {} has not yet been implemented".format(content_type)) - deserialized_body = body_schema.from_openapi_data_oapg(body_data, _configuration=configuration) + raise NotImplementedError('Deserialization of {} has not yet been implemented'.format(content_type)) + deserialized_body = body_schema.from_openapi_data_oapg( + body_data, _configuration=configuration) elif streamed: response.release_conn() - return self.response_cls(response=response, headers=deserialized_headers, body=deserialized_body) + return self.response_cls( + response=response, + headers=deserialized_headers, + body=deserialized_body + ) class ApiClient: @@ -968,7 +990,7 @@ def __init__( header_name: typing.Optional[str] = None, header_value: typing.Optional[str] = None, cookie: typing.Optional[str] = None, - pool_threads: int = 1, + pool_threads: int = 1 ): if configuration is None: configuration = Configuration() @@ -981,7 +1003,7 @@ def __init__( self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. - self.user_agent = "OpenAPI-Generator/1.1.2/python" + self.user_agent = 'OpenAPI-Generator/1.0.0/python' def __enter__(self): return self @@ -994,13 +1016,13 @@ def close(self): self._pool.close() self._pool.join() self._pool = None - if hasattr(atexit, "unregister"): + if hasattr(atexit, 'unregister'): atexit.unregister(self.close) @property def pool(self): """Create thread pool on first request - avoids instantiating unused threadpool for blocking clients. + avoids instantiating unused threadpool for blocking clients. """ if self._pool is None: atexit.register(self.close) @@ -1010,11 +1032,11 @@ def pool(self): @property def user_agent(self): """User agent for this API client""" - return self.default_headers["User-Agent"] + return self.default_headers['User-Agent'] @user_agent.setter def user_agent(self, value): - self.default_headers["User-Agent"] = value + self.default_headers['User-Agent'] = value def set_default_header(self, header_name, header_value): self.default_headers[header_name] = header_value @@ -1031,13 +1053,15 @@ def __call_api( timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, host: typing.Optional[str] = None, ) -> urllib3.HTTPResponse: + # header parameters used_headers = HTTPHeaderDict(self.default_headers) if self.cookie: - headers["Cookie"] = self.cookie + headers['Cookie'] = self.cookie # auth setting - self.update_params_for_auth(used_headers, auth_settings, resource_path, method, body) + self.update_params_for_auth(used_headers, + auth_settings, resource_path, method, body) # must happen after cookie setting and auth setting in case user is overriding those if headers: @@ -1135,7 +1159,7 @@ def call_api( stream, timeout, host, - ), + ) ) def request( @@ -1150,27 +1174,57 @@ def request( ) -> urllib3.HTTPResponse: """Makes the HTTP request using RESTClient.""" if method == "GET": - return self.rest_client.GET(url, stream=stream, timeout=timeout, headers=headers) + return self.rest_client.GET(url, + stream=stream, + timeout=timeout, + headers=headers) elif method == "HEAD": - return self.rest_client.HEAD(url, stream=stream, timeout=timeout, headers=headers) + return self.rest_client.HEAD(url, + stream=stream, + timeout=timeout, + headers=headers) elif method == "OPTIONS": - return self.rest_client.OPTIONS( - url, headers=headers, fields=fields, stream=stream, timeout=timeout, body=body - ) + return self.rest_client.OPTIONS(url, + headers=headers, + fields=fields, + stream=stream, + timeout=timeout, + body=body) elif method == "POST": - return self.rest_client.POST(url, headers=headers, fields=fields, stream=stream, timeout=timeout, body=body) + return self.rest_client.POST(url, + headers=headers, + fields=fields, + stream=stream, + timeout=timeout, + body=body) elif method == "PUT": - return self.rest_client.PUT(url, headers=headers, fields=fields, stream=stream, timeout=timeout, body=body) + return self.rest_client.PUT(url, + headers=headers, + fields=fields, + stream=stream, + timeout=timeout, + body=body) elif method == "PATCH": - return self.rest_client.PATCH( - url, headers=headers, fields=fields, stream=stream, timeout=timeout, body=body - ) + return self.rest_client.PATCH(url, + headers=headers, + fields=fields, + stream=stream, + timeout=timeout, + body=body) elif method == "DELETE": - return self.rest_client.DELETE(url, headers=headers, stream=stream, timeout=timeout, body=body) + return self.rest_client.DELETE(url, + headers=headers, + stream=stream, + timeout=timeout, + body=body) else: - raise ApiValueError("http method must be `GET`, `HEAD`, `OPTIONS`," " `POST`, `PATCH`, `PUT` or `DELETE`.") + raise ApiValueError( + "http method must be `GET`, `HEAD`, `OPTIONS`," + " `POST`, `PATCH`, `PUT` or `DELETE`." + ) - def update_params_for_auth(self, headers, auth_settings, resource_path, method, body): + def update_params_for_auth(self, headers, auth_settings, + resource_path, method, body): """Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. @@ -1187,19 +1241,21 @@ def update_params_for_auth(self, headers, auth_settings, resource_path, method, auth_setting = self.configuration.auth_settings().get(auth) if not auth_setting: continue - if auth_setting["in"] == "cookie": - headers.add("Cookie", auth_setting["value"]) - elif auth_setting["in"] == "header": - if auth_setting["type"] != "http-signature": - headers.add(auth_setting["key"], auth_setting["value"]) - elif auth_setting["in"] == "query": - """TODO implement auth in query + if auth_setting['in'] == 'cookie': + headers.add('Cookie', auth_setting['value']) + elif auth_setting['in'] == 'header': + if auth_setting['type'] != 'http-signature': + headers.add(auth_setting['key'], auth_setting['value']) + elif auth_setting['in'] == 'query': + """ TODO implement auth in query need to pass in prefix_separator_iterator and need to output resource_path with query params added """ raise ApiValueError("Auth in query not yet implemented") else: - raise ApiValueError("Authentication token must be in `query` or `header`") + raise ApiValueError( + 'Authentication token must be in `query` or `header`' + ) class Api: @@ -1215,9 +1271,7 @@ def __init__(self, api_client: typing.Optional[ApiClient] = None): self.api_client = api_client @staticmethod - def _verify_typed_dict_inputs_oapg( - cls: typing.Type[typing_extensions.TypedDict], data: typing.Dict[str, typing.Any] - ): + def _verify_typed_dict_inputs_oapg(cls: typing.Type[typing_extensions.TypedDict], data: typing.Dict[str, typing.Any]): """ Ensures that: - required keys are present @@ -1236,13 +1290,13 @@ def _verify_typed_dict_inputs_oapg( required_keys_with_unset_values.append(required_key) if missing_required_keys: raise ApiTypeError( - "{} missing {} required arguments: {}".format( + '{} missing {} required arguments: {}'.format( cls.__name__, len(missing_required_keys), missing_required_keys - ) - ) + ) + ) if required_keys_with_unset_values: raise ApiValueError( - "{} contains invalid unset values for {} required keys: {}".format( + '{} contains invalid unset values for {} required keys: {}'.format( cls.__name__, len(required_keys_with_unset_values), required_keys_with_unset_values ) ) @@ -1254,7 +1308,7 @@ def _verify_typed_dict_inputs_oapg( disallowed_additional_keys.append(key) if disallowed_additional_keys: raise ApiTypeError( - "{} got {} unexpected keyword arguments: {}".format( + '{} got {} unexpected keyword arguments: {}'.format( cls.__name__, len(disallowed_additional_keys), disallowed_additional_keys ) ) @@ -1263,21 +1317,28 @@ def _get_host_oapg( self, operation_id: str, servers: typing.Tuple[typing.Dict[str, str], ...] = tuple(), - host_index: typing.Optional[int] = None, + host_index: typing.Optional[int] = None ) -> typing.Optional[str]: configuration = self.api_client.configuration try: if host_index is None: - index = configuration.server_operation_index.get(operation_id, configuration.server_index) + index = configuration.server_operation_index.get( + operation_id, configuration.server_index + ) else: index = host_index server_variables = configuration.server_operation_variables.get( operation_id, configuration.server_variables ) - host = configuration.get_host_from_settings(index, variables=server_variables, servers=servers) + host = configuration.get_host_from_settings( + index, variables=server_variables, servers=servers + ) except IndexError: if servers: - raise ApiValueError("Invalid host index. Must be 0 <= index < %s" % len(servers)) + raise ApiValueError( + "Invalid host index. Must be 0 <= index < %s" % + len(servers) + ) host = None return host @@ -1292,7 +1353,6 @@ class RequestBody(StyleFormSerializer, JSONDetector): A request body parameter content: content_type to MediaType Schema info """ - __json_encoder = JSONEncoder() def __init__( @@ -1302,39 +1362,44 @@ def __init__( ): self.required = required if len(content) == 0: - raise ValueError("Invalid value for content, the content dict must have >= 1 entry") + raise ValueError('Invalid value for content, the content dict must have >= 1 entry') self.content = content - def __serialize_json(self, in_data: typing.Any) -> typing.Dict[str, bytes]: + def __serialize_json( + self, + in_data: typing.Any + ) -> typing.Dict[str, bytes]: in_data = self.__json_encoder.default(in_data) - json_str = json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode("utf-8") + json_str = json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode( + "utf-8" + ) return dict(body=json_str) @staticmethod def __serialize_text_plain(in_data: typing.Any) -> typing.Dict[str, str]: if isinstance(in_data, frozendict.frozendict): - raise ValueError("Unable to serialize type frozendict.frozendict to text/plain") + raise ValueError('Unable to serialize type frozendict.frozendict to text/plain') elif isinstance(in_data, tuple): - raise ValueError("Unable to serialize type tuple to text/plain") + raise ValueError('Unable to serialize type tuple to text/plain') elif isinstance(in_data, NoneClass): - raise ValueError("Unable to serialize type NoneClass to text/plain") + raise ValueError('Unable to serialize type NoneClass to text/plain') elif isinstance(in_data, BoolClass): - raise ValueError("Unable to serialize type BoolClass to text/plain") + raise ValueError('Unable to serialize type BoolClass to text/plain') return dict(body=str(in_data)) def __multipart_json_item(self, key: str, value: Schema) -> RequestField: json_value = self.__json_encoder.default(value) request_field = RequestField(name=key, data=json.dumps(json_value)) - request_field.make_multipart(content_type="application/json") + request_field.make_multipart(content_type='application/json') return request_field def __multipart_form_item(self, key: str, value: Schema) -> RequestField: if isinstance(value, str): request_field = RequestField(name=key, data=str(value)) - request_field.make_multipart(content_type="text/plain") + request_field.make_multipart(content_type='text/plain') elif isinstance(value, bytes): request_field = RequestField(name=key, data=value) - request_field.make_multipart(content_type="application/octet-stream") + request_field.make_multipart(content_type='application/octet-stream') elif isinstance(value, FileIO): # TODO use content.encoding to limit allowed content types if they are present request_field = RequestField.from_tuples(key, (os.path.basename(value.name), value.read())) @@ -1343,9 +1408,11 @@ def __multipart_form_item(self, key: str, value: Schema) -> RequestField: request_field = self.__multipart_json_item(key=key, value=value) return request_field - def __serialize_multipart_form_data(self, in_data: Schema) -> typing.Dict[str, typing.Tuple[RequestField, ...]]: + def __serialize_multipart_form_data( + self, in_data: Schema + ) -> typing.Dict[str, typing.Tuple[RequestField, ...]]: if not isinstance(in_data, frozendict.frozendict): - raise ValueError(f"Unable to serialize {in_data} to multipart/form-data because it is not a dict of data") + raise ValueError(f'Unable to serialize {in_data} to multipart/form-data because it is not a dict of data') """ In a multipart/form-data request body, each schema property, or each element of a schema array property, takes a section in the payload with an internal header as defined by RFC7578. The serialization strategy @@ -1385,19 +1452,22 @@ def __serialize_application_octet_stream(self, in_data: BinarySchema) -> typing. in_data.close() return result - def __serialize_application_x_www_form_data(self, in_data: typing.Any) -> SerializedRequestBody: + def __serialize_application_x_www_form_data( + self, in_data: typing.Any + ) -> SerializedRequestBody: """ POST submission of form data in body """ if not isinstance(in_data, frozendict.frozendict): raise ValueError( - f"Unable to serialize {in_data} to application/x-www-form-urlencoded because it is not a dict of data" - ) + f'Unable to serialize {in_data} to application/x-www-form-urlencoded because it is not a dict of data') cast_in_data = self.__json_encoder.default(in_data) - value = self._serialize_form(cast_in_data, name="", explode=True, percent_encode=True) + value = self._serialize_form(cast_in_data, name='', explode=True, percent_encode=True) return dict(body=value) - def serialize(self, in_data: typing.Any, content_type: str) -> SerializedRequestBody: + def serialize( + self, in_data: typing.Any, content_type: str + ) -> SerializedRequestBody: """ If a str is returned then the result will be assigned to data when making the request If a tuple is returned then the result will be used as fields input in encode_multipart_formdata @@ -1418,12 +1488,12 @@ def serialize(self, in_data: typing.Any, content_type: str) -> SerializedRequest # and content_type is multipart or application/x-www-form-urlencoded if self._content_type_is_json(content_type): return self.__serialize_json(cast_in_data) - elif content_type == "text/plain": + elif content_type == 'text/plain': return self.__serialize_text_plain(cast_in_data) - elif content_type == "multipart/form-data": + elif content_type == 'multipart/form-data': return self.__serialize_multipart_form_data(cast_in_data) - elif content_type == "application/x-www-form-urlencoded": + elif content_type == 'application/x-www-form-urlencoded': return self.__serialize_application_x_www_form_data(cast_in_data) - elif content_type == "application/octet-stream": + elif content_type == 'application/octet-stream': return self.__serialize_application_octet_stream(cast_in_data) - raise NotImplementedError("Serialization has not yet been implemented for {}".format(content_type)) + raise NotImplementedError('Serialization has not yet been implemented for {}'.format(content_type)) \ No newline at end of file diff --git a/launch/api_client/apis/__init__.py b/launch/api_client/apis/__init__.py index 5ca66b80..7840f772 100644 --- a/launch/api_client/apis/__init__.py +++ b/launch/api_client/apis/__init__.py @@ -1,3 +1,3 @@ # do not import all endpoints into this module because that uses a lot of memory and stack frames # if you need the ability to import all endpoints then import them from -# tags, paths, or path_to_api, or tag_to_api +# tags, paths, or path_to_api, or tag_to_api \ No newline at end of file diff --git a/launch/api_client/apis/path_to_api.py b/launch/api_client/apis/path_to_api.py index ebbfc332..5bc8d9ce 100644 --- a/launch/api_client/apis/path_to_api.py +++ b/launch/api_client/apis/path_to_api.py @@ -76,6 +76,9 @@ from launch.api_client.apis.paths.v1_model_endpoints_model_endpoint_id import ( V1ModelEndpointsModelEndpointId, ) +from launch.api_client.apis.paths.v1_model_endpoints_model_endpoint_id_restart import ( + V1ModelEndpointsModelEndpointIdRestart, +) from launch.api_client.apis.paths.v1_model_endpoints_schema_json import ( V1ModelEndpointsSchemaJson, ) @@ -85,6 +88,17 @@ from launch.api_client.apis.paths.v1_triggers_trigger_id import ( V1TriggersTriggerId, ) +from launch.api_client.apis.paths.v2_batch_completions import ( + V2BatchCompletions, +) +from launch.api_client.apis.paths.v2_batch_completions_batch_completion_id import ( + V2BatchCompletionsBatchCompletionId, +) +from launch.api_client.apis.paths.v2_batch_completions_batch_completion_id_actions_cancel import ( + V2BatchCompletionsBatchCompletionIdActionsCancel, +) +from launch.api_client.apis.paths.v2_chat_completions import V2ChatCompletions +from launch.api_client.apis.paths.v2_completions import V2Completions from launch.api_client.apis.paths.v2_model_bundles import V2ModelBundles from launch.api_client.apis.paths.v2_model_bundles_clone_with_changes import ( V2ModelBundlesCloneWithChanges, @@ -98,94 +112,106 @@ from launch.api_client.paths import PathValues PathToApi = typing_extensions.TypedDict( - "PathToApi", + 'PathToApi', { - PathValues.HEALTHCHECK: Healthcheck, - PathValues.HEALTHZ: Healthz, - PathValues.READYZ: Readyz, - PathValues.V1_ASYNCTASKS: V1AsyncTasks, - PathValues.V1_ASYNCTASKS_TASK_ID: V1AsyncTasksTaskId, PathValues.V1_BATCHJOBS: V1BatchJobs, PathValues.V1_BATCHJOBS_BATCH_JOB_ID: V1BatchJobsBatchJobId, + PathValues.V1_DOCKERIMAGEBATCHJOBS: V1DockerImageBatchJobs, + PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID: V1DockerImageBatchJobsBatchJobId, + PathValues.V1_ASYNCTASKS: V1AsyncTasks, + PathValues.V1_ASYNCTASKS_TASK_ID: V1AsyncTasksTaskId, + PathValues.V1_SYNCTASKS: V1SyncTasks, + PathValues.V1_STREAMINGTASKS: V1StreamingTasks, + PathValues.V1_MODELBUNDLES: V1ModelBundles, + PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES: V1ModelBundlesCloneWithChanges, + PathValues.V1_MODELBUNDLES_LATEST: V1ModelBundlesLatest, + PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID: V1ModelBundlesModelBundleId, + PathValues.V2_MODELBUNDLES: V2ModelBundles, + PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES: V2ModelBundlesCloneWithChanges, + PathValues.V2_MODELBUNDLES_LATEST: V2ModelBundlesLatest, + PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID: V2ModelBundlesModelBundleId, + PathValues.V1_MODELENDPOINTS: V1ModelEndpoints, + PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID: V1ModelEndpointsModelEndpointId, + PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART: V1ModelEndpointsModelEndpointIdRestart, + PathValues.V1_MODELENDPOINTSSCHEMA_JSON: V1ModelEndpointsSchemaJson, + PathValues.V1_MODELENDPOINTSAPI: V1ModelEndpointsApi, PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES: V1DockerImageBatchJobBundles, PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST: V1DockerImageBatchJobBundlesLatest, PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID: V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId, - PathValues.V1_DOCKERIMAGEBATCHJOBS: V1DockerImageBatchJobs, - PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID: V1DockerImageBatchJobsBatchJobId, - PathValues.V1_FILES: V1Files, - PathValues.V1_FILES_FILE_ID: V1FilesFileId, - PathValues.V1_FILES_FILE_ID_CONTENT: V1FilesFileIdContent, - PathValues.V1_LLM_BATCHCOMPLETIONS: V1LlmBatchCompletions, - PathValues.V1_LLM_COMPLETIONSSTREAM: V1LlmCompletionsStream, + PathValues.V1_LLM_MODELENDPOINTS: V1LlmModelEndpoints, + PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME: V1LlmModelEndpointsModelEndpointName, PathValues.V1_LLM_COMPLETIONSSYNC: V1LlmCompletionsSync, + PathValues.V1_LLM_COMPLETIONSSTREAM: V1LlmCompletionsStream, PathValues.V1_LLM_FINETUNES: V1LlmFineTunes, PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID: V1LlmFineTunesFineTuneId, PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL: V1LlmFineTunesFineTuneIdCancel, PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS: V1LlmFineTunesFineTuneIdEvents, - PathValues.V1_LLM_MODELENDPOINTS: V1LlmModelEndpoints, PathValues.V1_LLM_MODELENDPOINTS_DOWNLOAD: V1LlmModelEndpointsDownload, - PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME: V1LlmModelEndpointsModelEndpointName, - PathValues.V1_MODELBUNDLES: V1ModelBundles, - PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES: V1ModelBundlesCloneWithChanges, - PathValues.V1_MODELBUNDLES_LATEST: V1ModelBundlesLatest, - PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID: V1ModelBundlesModelBundleId, - PathValues.V1_MODELENDPOINTS: V1ModelEndpoints, - PathValues.V1_MODELENDPOINTSAPI: V1ModelEndpointsApi, - PathValues.V1_MODELENDPOINTSSCHEMA_JSON: V1ModelEndpointsSchemaJson, - PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID: V1ModelEndpointsModelEndpointId, - PathValues.V1_STREAMINGTASKS: V1StreamingTasks, - PathValues.V1_SYNCTASKS: V1SyncTasks, + PathValues.V1_LLM_BATCHCOMPLETIONS: V1LlmBatchCompletions, + PathValues.V1_FILES: V1Files, + PathValues.V1_FILES_FILE_ID: V1FilesFileId, + PathValues.V1_FILES_FILE_ID_CONTENT: V1FilesFileIdContent, PathValues.V1_TRIGGERS: V1Triggers, PathValues.V1_TRIGGERS_TRIGGER_ID: V1TriggersTriggerId, - PathValues.V2_MODELBUNDLES: V2ModelBundles, - PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES: V2ModelBundlesCloneWithChanges, - PathValues.V2_MODELBUNDLES_LATEST: V2ModelBundlesLatest, - PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID: V2ModelBundlesModelBundleId, - }, + PathValues.V2_BATCHCOMPLETIONS: V2BatchCompletions, + PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID: V2BatchCompletionsBatchCompletionId, + PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL: V2BatchCompletionsBatchCompletionIdActionsCancel, + PathValues.V2_CHAT_COMPLETIONS: V2ChatCompletions, + PathValues.V2_COMPLETIONS: V2Completions, + PathValues.HEALTHCHECK: Healthcheck, + PathValues.HEALTHZ: Healthz, + PathValues.READYZ: Readyz, + } ) path_to_api = PathToApi( { - PathValues.HEALTHCHECK: Healthcheck, - PathValues.HEALTHZ: Healthz, - PathValues.READYZ: Readyz, - PathValues.V1_ASYNCTASKS: V1AsyncTasks, - PathValues.V1_ASYNCTASKS_TASK_ID: V1AsyncTasksTaskId, PathValues.V1_BATCHJOBS: V1BatchJobs, PathValues.V1_BATCHJOBS_BATCH_JOB_ID: V1BatchJobsBatchJobId, + PathValues.V1_DOCKERIMAGEBATCHJOBS: V1DockerImageBatchJobs, + PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID: V1DockerImageBatchJobsBatchJobId, + PathValues.V1_ASYNCTASKS: V1AsyncTasks, + PathValues.V1_ASYNCTASKS_TASK_ID: V1AsyncTasksTaskId, + PathValues.V1_SYNCTASKS: V1SyncTasks, + PathValues.V1_STREAMINGTASKS: V1StreamingTasks, + PathValues.V1_MODELBUNDLES: V1ModelBundles, + PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES: V1ModelBundlesCloneWithChanges, + PathValues.V1_MODELBUNDLES_LATEST: V1ModelBundlesLatest, + PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID: V1ModelBundlesModelBundleId, + PathValues.V2_MODELBUNDLES: V2ModelBundles, + PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES: V2ModelBundlesCloneWithChanges, + PathValues.V2_MODELBUNDLES_LATEST: V2ModelBundlesLatest, + PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID: V2ModelBundlesModelBundleId, + PathValues.V1_MODELENDPOINTS: V1ModelEndpoints, + PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID: V1ModelEndpointsModelEndpointId, + PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART: V1ModelEndpointsModelEndpointIdRestart, + PathValues.V1_MODELENDPOINTSSCHEMA_JSON: V1ModelEndpointsSchemaJson, + PathValues.V1_MODELENDPOINTSAPI: V1ModelEndpointsApi, PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES: V1DockerImageBatchJobBundles, PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST: V1DockerImageBatchJobBundlesLatest, PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID: V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId, - PathValues.V1_DOCKERIMAGEBATCHJOBS: V1DockerImageBatchJobs, - PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID: V1DockerImageBatchJobsBatchJobId, - PathValues.V1_FILES: V1Files, - PathValues.V1_FILES_FILE_ID: V1FilesFileId, - PathValues.V1_FILES_FILE_ID_CONTENT: V1FilesFileIdContent, - PathValues.V1_LLM_BATCHCOMPLETIONS: V1LlmBatchCompletions, - PathValues.V1_LLM_COMPLETIONSSTREAM: V1LlmCompletionsStream, + PathValues.V1_LLM_MODELENDPOINTS: V1LlmModelEndpoints, + PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME: V1LlmModelEndpointsModelEndpointName, PathValues.V1_LLM_COMPLETIONSSYNC: V1LlmCompletionsSync, + PathValues.V1_LLM_COMPLETIONSSTREAM: V1LlmCompletionsStream, PathValues.V1_LLM_FINETUNES: V1LlmFineTunes, PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID: V1LlmFineTunesFineTuneId, PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL: V1LlmFineTunesFineTuneIdCancel, PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS: V1LlmFineTunesFineTuneIdEvents, - PathValues.V1_LLM_MODELENDPOINTS: V1LlmModelEndpoints, PathValues.V1_LLM_MODELENDPOINTS_DOWNLOAD: V1LlmModelEndpointsDownload, - PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME: V1LlmModelEndpointsModelEndpointName, - PathValues.V1_MODELBUNDLES: V1ModelBundles, - PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES: V1ModelBundlesCloneWithChanges, - PathValues.V1_MODELBUNDLES_LATEST: V1ModelBundlesLatest, - PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID: V1ModelBundlesModelBundleId, - PathValues.V1_MODELENDPOINTS: V1ModelEndpoints, - PathValues.V1_MODELENDPOINTSAPI: V1ModelEndpointsApi, - PathValues.V1_MODELENDPOINTSSCHEMA_JSON: V1ModelEndpointsSchemaJson, - PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID: V1ModelEndpointsModelEndpointId, - PathValues.V1_STREAMINGTASKS: V1StreamingTasks, - PathValues.V1_SYNCTASKS: V1SyncTasks, + PathValues.V1_LLM_BATCHCOMPLETIONS: V1LlmBatchCompletions, + PathValues.V1_FILES: V1Files, + PathValues.V1_FILES_FILE_ID: V1FilesFileId, + PathValues.V1_FILES_FILE_ID_CONTENT: V1FilesFileIdContent, PathValues.V1_TRIGGERS: V1Triggers, PathValues.V1_TRIGGERS_TRIGGER_ID: V1TriggersTriggerId, - PathValues.V2_MODELBUNDLES: V2ModelBundles, - PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES: V2ModelBundlesCloneWithChanges, - PathValues.V2_MODELBUNDLES_LATEST: V2ModelBundlesLatest, - PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID: V2ModelBundlesModelBundleId, + PathValues.V2_BATCHCOMPLETIONS: V2BatchCompletions, + PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID: V2BatchCompletionsBatchCompletionId, + PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL: V2BatchCompletionsBatchCompletionIdActionsCancel, + PathValues.V2_CHAT_COMPLETIONS: V2ChatCompletions, + PathValues.V2_COMPLETIONS: V2Completions, + PathValues.HEALTHCHECK: Healthcheck, + PathValues.HEALTHZ: Healthz, + PathValues.READYZ: Readyz, } ) diff --git a/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id_restart.py b/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id_restart.py new file mode 100644 index 00000000..a0a147c8 --- /dev/null +++ b/launch/api_client/apis/paths/v1_model_endpoints_model_endpoint_id_restart.py @@ -0,0 +1,9 @@ +from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart.post import ( + ApiForpost, +) + + +class V1ModelEndpointsModelEndpointIdRestart( + ApiForpost, +): + pass diff --git a/launch/api_client/apis/paths/v2_batch_completions.py b/launch/api_client/apis/paths/v2_batch_completions.py new file mode 100644 index 00000000..bd23de97 --- /dev/null +++ b/launch/api_client/apis/paths/v2_batch_completions.py @@ -0,0 +1,7 @@ +from launch.api_client.paths.v2_batch_completions.post import ApiForpost + + +class V2BatchCompletions( + ApiForpost, +): + pass diff --git a/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id.py b/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id.py new file mode 100644 index 00000000..b2d71d04 --- /dev/null +++ b/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id.py @@ -0,0 +1,13 @@ +from launch.api_client.paths.v2_batch_completions_batch_completion_id.get import ( + ApiForget, +) +from launch.api_client.paths.v2_batch_completions_batch_completion_id.post import ( + ApiForpost, +) + + +class V2BatchCompletionsBatchCompletionId( + ApiForget, + ApiForpost, +): + pass diff --git a/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id_actions_cancel.py b/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id_actions_cancel.py new file mode 100644 index 00000000..0fc54183 --- /dev/null +++ b/launch/api_client/apis/paths/v2_batch_completions_batch_completion_id_actions_cancel.py @@ -0,0 +1,9 @@ +from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel.post import ( + ApiForpost, +) + + +class V2BatchCompletionsBatchCompletionIdActionsCancel( + ApiForpost, +): + pass diff --git a/launch/api_client/apis/paths/v2_chat_completions.py b/launch/api_client/apis/paths/v2_chat_completions.py new file mode 100644 index 00000000..72995ebd --- /dev/null +++ b/launch/api_client/apis/paths/v2_chat_completions.py @@ -0,0 +1,7 @@ +from launch.api_client.paths.v2_chat_completions.post import ApiForpost + + +class V2ChatCompletions( + ApiForpost, +): + pass diff --git a/launch/api_client/apis/paths/v2_completions.py b/launch/api_client/apis/paths/v2_completions.py new file mode 100644 index 00000000..a2189e8c --- /dev/null +++ b/launch/api_client/apis/paths/v2_completions.py @@ -0,0 +1,7 @@ +from launch.api_client.paths.v2_completions.post import ApiForpost + + +class V2Completions( + ApiForpost, +): + pass diff --git a/launch/api_client/apis/tag_to_api.py b/launch/api_client/apis/tag_to_api.py index 8b3c0d6d..f2a2dfcf 100644 --- a/launch/api_client/apis/tag_to_api.py +++ b/launch/api_client/apis/tag_to_api.py @@ -4,10 +4,10 @@ from launch.api_client.apis.tags.default_api import DefaultApi TagToApi = typing_extensions.TypedDict( - "TagToApi", + 'TagToApi', { TagValues.DEFAULT: DefaultApi, - }, + } ) tag_to_api = TagToApi( diff --git a/launch/api_client/apis/tags/default_api.py b/launch/api_client/apis/tags/default_api.py index 3b843ffe..01235de7 100644 --- a/launch/api_client/apis/tags/default_api.py +++ b/launch/api_client/apis/tags/default_api.py @@ -137,6 +137,9 @@ from launch.api_client.paths.v1_model_endpoints_model_endpoint_id.put import ( UpdateModelEndpointV1ModelEndpointsModelEndpointIdPut, ) +from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart.post import ( + RestartModelEndpointV1ModelEndpointsModelEndpointIdRestartPost, +) from launch.api_client.paths.v1_model_endpoints_schema_json.get import ( GetModelEndpointsSchemaV1ModelEndpointsSchemaJsonGet, ) @@ -159,6 +162,24 @@ from launch.api_client.paths.v1_triggers_trigger_id.put import ( UpdateTriggerV1TriggersTriggerIdPut, ) +from launch.api_client.paths.v2_batch_completions.post import ( + BatchCompletionsV2BatchCompletionsPost, +) +from launch.api_client.paths.v2_batch_completions_batch_completion_id.get import ( + GetBatchCompletionV2BatchCompletionsBatchCompletionIdGet, +) +from launch.api_client.paths.v2_batch_completions_batch_completion_id.post import ( + UpdateBatchCompletionV2BatchCompletionsBatchCompletionIdPost, +) +from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel.post import ( + CancelBatchCompletionV2BatchCompletionsBatchCompletionIdActionsCancelPost, +) +from launch.api_client.paths.v2_chat_completions.post import ( + ChatCompletionV2ChatCompletionsPost, +) +from launch.api_client.paths.v2_completions.post import ( + CompletionV2CompletionsPost, +) from launch.api_client.paths.v2_model_bundles.get import ( ListModelBundlesV2ModelBundlesGet, ) @@ -177,9 +198,13 @@ class DefaultApi( + BatchCompletionsV2BatchCompletionsPost, + CancelBatchCompletionV2BatchCompletionsBatchCompletionIdActionsCancelPost, CancelFineTuneV1LlmFineTunesFineTuneIdCancelPut, + ChatCompletionV2ChatCompletionsPost, CloneModelBundleWithChangesV1ModelBundlesCloneWithChangesPost, CloneModelBundleWithChangesV2ModelBundlesCloneWithChangesPost, + CompletionV2CompletionsPost, CreateAsyncInferenceTaskV1AsyncTasksPost, CreateBatchCompletionsV1LlmBatchCompletionsPost, CreateBatchJobV1BatchJobsPost, @@ -201,6 +226,7 @@ class DefaultApi( DeleteTriggerV1TriggersTriggerIdDelete, DownloadModelEndpointV1LlmModelEndpointsDownloadPost, GetAsyncInferenceTaskV1AsyncTasksTaskIdGet, + GetBatchCompletionV2BatchCompletionsBatchCompletionIdGet, GetBatchJobV1BatchJobsBatchJobIdGet, GetDockerImageBatchJobModelBundleV1DockerImageBatchJobBundlesDockerImageBatchJobBundleIdGet, GetDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdGet, @@ -230,6 +256,8 @@ class DefaultApi( ListModelEndpointsV1LlmModelEndpointsGet, ListModelEndpointsV1ModelEndpointsGet, ListTriggersV1TriggersGet, + RestartModelEndpointV1ModelEndpointsModelEndpointIdRestartPost, + UpdateBatchCompletionV2BatchCompletionsBatchCompletionIdPost, UpdateBatchJobV1BatchJobsBatchJobIdPut, UpdateDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdPut, UpdateModelEndpointV1LlmModelEndpointsModelEndpointNamePut, @@ -242,5 +270,4 @@ class DefaultApi( Do not edit the class manually. """ - pass diff --git a/launch/api_client/configuration.py b/launch/api_client/configuration.py index ca4f7085..bf5aed0d 100644 --- a/launch/api_client/configuration.py +++ b/launch/api_client/configuration.py @@ -20,88 +20,78 @@ from launch.api_client.exceptions import ApiValueError JSON_SCHEMA_VALIDATION_KEYWORDS = { - "multipleOf", - "maximum", - "exclusiveMaximum", - "minimum", - "exclusiveMinimum", - "maxLength", - "minLength", - "pattern", - "maxItems", - "minItems", - "uniqueItems", - "maxProperties", - "minProperties", + 'multipleOf', 'maximum', 'exclusiveMaximum', + 'minimum', 'exclusiveMinimum', 'maxLength', + 'minLength', 'pattern', 'maxItems', 'minItems', + 'uniqueItems', 'maxProperties', 'minProperties', } - class Configuration(object): """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - Do not edit the class manually. - - :param host: Base url - :param api_key: Dict to store API key(s). - Each entry in the dict specifies an API key. - The dict key is the name of the security scheme in the OAS specification. - The dict value is the API key secret. - :param api_key_prefix: Dict to store API prefix (e.g. Bearer) - The dict key is the name of the security scheme in the OAS specification. - The dict value is an API key prefix when generating the auth data. - :param username: Username for HTTP basic authentication - :param password: Password for HTTP basic authentication - :param discard_unknown_keys: Boolean value indicating whether to discard - unknown properties. A server may send a response that includes additional - properties that are not known by the client in the following scenarios: - 1. The OpenAPI document is incomplete, i.e. it does not match the server - implementation. - 2. The client was generated using an older version of the OpenAPI document - and the server has been upgraded since then. - If a schema in the OpenAPI document defines the additionalProperties attribute, - then all undeclared properties received by the server are injected into the - additional properties map. In that case, there are undeclared properties, and - nothing to discard. - :param disabled_client_side_validations (string): Comma-separated list of - JSON schema validation keywords to disable JSON schema structural validation - rules. The following keywords may be specified: multipleOf, maximum, - exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern, - maxItems, minItems. - By default, the validation is performed for data generated locally by the client - and data received from the server, independent of any validation performed by - the server side. If the input data does not satisfy the JSON schema validation - rules specified in the OpenAPI document, an exception is raised. - If disabled_client_side_validations is set, structural validation is - disabled. This can be useful to troubleshoot data validation problem, such as - when the OpenAPI document validation rules do not match the actual API data - received by the server. - :param server_index: Index to servers configuration. - :param server_variables: Mapping with string values to replace variables in - templated server configuration. The validation of enums is performed for - variables with defined enum values before. - :param server_operation_index: Mapping from operation ID to an index to server - configuration. - :param server_operation_variables: Mapping from operation ID to a mapping with - string values to replace variables in templated server configuration. - The validation of enums is performed for variables with defined enum values before. - - :Example: - - HTTP Basic Authentication Example. - Given the following security scheme in the OpenAPI specification: - components: - securitySchemes: - http_basic_auth: - type: http - scheme: basic - - Configure API client with HTTP basic authentication: - - conf = launch.api_client.Configuration( - username='the-user', - password='the-password', - ) + Ref: https://openapi-generator.tech + Do not edit the class manually. + + :param host: Base url + :param api_key: Dict to store API key(s). + Each entry in the dict specifies an API key. + The dict key is the name of the security scheme in the OAS specification. + The dict value is the API key secret. + :param api_key_prefix: Dict to store API prefix (e.g. Bearer) + The dict key is the name of the security scheme in the OAS specification. + The dict value is an API key prefix when generating the auth data. + :param username: Username for HTTP basic authentication + :param password: Password for HTTP basic authentication + :param discard_unknown_keys: Boolean value indicating whether to discard + unknown properties. A server may send a response that includes additional + properties that are not known by the client in the following scenarios: + 1. The OpenAPI document is incomplete, i.e. it does not match the server + implementation. + 2. The client was generated using an older version of the OpenAPI document + and the server has been upgraded since then. + If a schema in the OpenAPI document defines the additionalProperties attribute, + then all undeclared properties received by the server are injected into the + additional properties map. In that case, there are undeclared properties, and + nothing to discard. + :param disabled_client_side_validations (string): Comma-separated list of + JSON schema validation keywords to disable JSON schema structural validation + rules. The following keywords may be specified: multipleOf, maximum, + exclusiveMaximum, minimum, exclusiveMinimum, maxLength, minLength, pattern, + maxItems, minItems. + By default, the validation is performed for data generated locally by the client + and data received from the server, independent of any validation performed by + the server side. If the input data does not satisfy the JSON schema validation + rules specified in the OpenAPI document, an exception is raised. + If disabled_client_side_validations is set, structural validation is + disabled. This can be useful to troubleshoot data validation problem, such as + when the OpenAPI document validation rules do not match the actual API data + received by the server. + :param server_index: Index to servers configuration. + :param server_variables: Mapping with string values to replace variables in + templated server configuration. The validation of enums is performed for + variables with defined enum values before. + :param server_operation_index: Mapping from operation ID to an index to server + configuration. + :param server_operation_variables: Mapping from operation ID to a mapping with + string values to replace variables in templated server configuration. + The validation of enums is performed for variables with defined enum values before. + + :Example: + + HTTP Basic Authentication Example. + Given the following security scheme in the OpenAPI specification: + components: + securitySchemes: + http_basic_auth: + type: http + scheme: basic + + Configure API client with HTTP basic authentication: + +conf = launch.api_client.Configuration( + username='the-user', + password='the-password', +) """ @@ -118,8 +108,10 @@ def __init__( server_variables=None, server_operation_index=None, server_operation_variables=None, + access_token=None, ): - """Constructor""" + """Constructor + """ self._base_path = "http://localhost" if host is None else host """Default Base url """ @@ -143,12 +135,15 @@ def __init__( """ self.discard_unknown_keys = discard_unknown_keys self.disabled_client_side_validations = disabled_client_side_validations + self.access_token = None + """access token for OAuth/Bearer + """ self.logger = {} """Logging Settings """ self.logger["package_logger"] = logging.getLogger("launch.api_client") self.logger["urllib3_logger"] = logging.getLogger("urllib3") - self.logger_format = "%(asctime)s %(levelname)s %(message)s" + self.logger_format = '%(asctime)s %(levelname)s %(message)s' """Log format """ self.logger_stream_handler = None @@ -196,7 +191,7 @@ def __init__( self.proxy_headers = None """Proxy headers """ - self.safe_chars_for_path_param = "" + self.safe_chars_for_path_param = '' """Safe chars for path_param """ self.retries = None @@ -213,7 +208,7 @@ def __deepcopy__(self, memo): result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): - if k not in ("logger", "logger_file_handler"): + if k not in ('logger', 'logger_file_handler'): setattr(result, k, copy.deepcopy(v, memo)) # shallow copy of loggers result.logger = copy.copy(self.logger) @@ -224,11 +219,12 @@ def __deepcopy__(self, memo): def __setattr__(self, name, value): object.__setattr__(self, name, value) - if name == "disabled_client_side_validations": - s = set(filter(None, value.split(","))) + if name == 'disabled_client_side_validations': + s = set(filter(None, value.split(','))) for v in s: if v not in JSON_SCHEMA_VALIDATION_KEYWORDS: - raise ApiValueError("Invalid keyword: '{0}''".format(v)) + raise ApiValueError( + "Invalid keyword: '{0}''".format(v)) self._disabled_client_side_validations = s @classmethod @@ -369,7 +365,9 @@ def get_basic_auth_token(self): password = "" if self.password is not None: password = self.password - return urllib3.util.make_headers(basic_auth=username + ":" + password).get("authorization") + return urllib3.util.make_headers( + basic_auth=username + ':' + password + ).get('authorization') def auth_settings(self): """Gets Auth Settings dict for api client. @@ -378,11 +376,18 @@ def auth_settings(self): """ auth = {} if self.username is not None and self.password is not None: - auth["HTTPBasic"] = { - "type": "basic", - "in": "header", - "key": "Authorization", - "value": self.get_basic_auth_token(), + auth['HTTPBasic'] = { + 'type': 'basic', + 'in': 'header', + 'key': 'Authorization', + 'value': self.get_basic_auth_token() + } + if self.access_token is not None: + auth['OAuth2PasswordBearer'] = { + 'type': 'oauth2', + 'in': 'header', + 'key': 'Authorization', + 'value': 'Bearer ' + self.access_token } return auth @@ -391,13 +396,12 @@ def to_debug_report(self): :return: The report for debugging. """ - return ( - "Python SDK Debug Report:\n" - "OS: {env}\n" - "Python Version: {pyversion}\n" - "Version of the API: 1.0.0\n" - "SDK Package Version: 1.1.2".format(env=sys.platform, pyversion=sys.version) - ) + return "Python SDK Debug Report:\n"\ + "OS: {env}\n"\ + "Python Version: {pyversion}\n"\ + "Version of the API: 1.0.0\n"\ + "SDK Package Version: 1.0.0".\ + format(env=sys.platform, pyversion=sys.version) def get_host_settings(self): """Gets an array of host settings @@ -406,8 +410,8 @@ def get_host_settings(self): """ return [ { - "url": "", - "description": "No description provided", + 'url': "", + 'description': "No description provided", } ] @@ -429,20 +433,22 @@ def get_host_from_settings(self, index, variables=None, servers=None): except IndexError: raise ValueError( "Invalid index {0} when selecting the host settings. " - "Must be less than {1}".format(index, len(servers)) - ) + "Must be less than {1}".format(index, len(servers))) - url = server["url"] + url = server['url'] # go through variables and replace placeholders - for variable_name, variable in server.get("variables", {}).items(): - used_value = variables.get(variable_name, variable["default_value"]) + for variable_name, variable in server.get('variables', {}).items(): + used_value = variables.get( + variable_name, variable['default_value']) - if "enum_values" in variable and used_value not in variable["enum_values"]: + if 'enum_values' in variable \ + and used_value not in variable['enum_values']: raise ValueError( "The variable `{0}` in the host URL has invalid value " - "{1}. Must be {2}.".format(variable_name, variables[variable_name], variable["enum_values"]) - ) + "{1}. Must be {2}.".format( + variable_name, variables[variable_name], + variable['enum_values'])) url = url.replace("{" + variable_name + "}", used_value) diff --git a/launch/api_client/exceptions.py b/launch/api_client/exceptions.py index f461e207..46687131 100644 --- a/launch/api_client/exceptions.py +++ b/launch/api_client/exceptions.py @@ -19,8 +19,9 @@ class OpenApiException(Exception): class ApiTypeError(OpenApiException, TypeError): - def __init__(self, msg, path_to_item=None, valid_classes=None, key_type=None): - """Raises an exception for TypeErrors + def __init__(self, msg, path_to_item=None, valid_classes=None, + key_type=None): + """ Raises an exception for TypeErrors Args: msg (str): the exception message @@ -123,9 +124,11 @@ def headers(self) -> typing.Optional[HTTPHeaderDict]: def __str__(self): """Custom error messages for exception""" - error_message = "({0})\n" "Reason: {1}\n".format(self.status, self.reason) + error_message = "({0})\n"\ + "Reason: {1}\n".format(self.status, self.reason) if self.headers: - error_message += "HTTP response headers: {0}\n".format(self.headers) + error_message += "HTTP response headers: {0}\n".format( + self.headers) if self.body: error_message += "HTTP response body: {0}\n".format(self.body) diff --git a/launch/api_client/model/annotation.py b/launch/api_client/model/annotation.py new file mode 100644 index 00000000..cd68a7ec --- /dev/null +++ b/launch/api_client/model/annotation.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Annotation( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "type", + "url_citation", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "url_citation": "URL_CITATION", + } + + @schemas.classproperty + def URL_CITATION(cls): + return cls("url_citation") + + @staticmethod + def url_citation() -> typing.Type['UrlCitation']: + return UrlCitation + __annotations__ = { + "type": type, + "url_citation": url_citation, + } + + type: MetaOapg.properties.type + url_citation: 'UrlCitation' + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["url_citation"]) -> 'UrlCitation': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "url_citation", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["url_citation"]) -> 'UrlCitation': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "url_citation", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + type: typing.Union[MetaOapg.properties.type, str, ], + url_citation: 'UrlCitation', + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Annotation': + return super().__new__( + cls, + *_args, + type=type, + url_citation=url_citation, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.url_citation import UrlCitation diff --git a/launch/api_client/model/create_fine_tune_response.pyi b/launch/api_client/model/audio.py similarity index 64% rename from launch/api_client/model/create_fine_tune_response.pyi rename to launch/api_client/model/audio.py index 1147b6d2..1fc9d562 100644 --- a/launch/api_client/model/create_fine_tune_response.pyi +++ b/launch/api_client/model/audio.py @@ -19,80 +19,61 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class CreateFineTuneResponse(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class Audio( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "id", } - + class properties: id = schemas.StrSchema __annotations__ = { "id": id, } + id: MetaOapg.properties.id - + @typing.overload def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + id: typing.Union[MetaOapg.properties.id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Audio': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/audio1.py b/launch/api_client/model/audio1.py new file mode 100644 index 00000000..8d5641c9 --- /dev/null +++ b/launch/api_client/model/audio1.py @@ -0,0 +1,119 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Audio1( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "expires_at", + "transcript", + "data", + "id", + } + + class properties: + id = schemas.StrSchema + expires_at = schemas.IntSchema + data = schemas.StrSchema + transcript = schemas.StrSchema + __annotations__ = { + "id": id, + "expires_at": expires_at, + "data": data, + "transcript": transcript, + } + + expires_at: MetaOapg.properties.expires_at + transcript: MetaOapg.properties.transcript + data: MetaOapg.properties.data + id: MetaOapg.properties.id + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["transcript"]) -> MetaOapg.properties.transcript: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "expires_at", "data", "transcript", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["transcript"]) -> MetaOapg.properties.transcript: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "expires_at", "data", "transcript", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + expires_at: typing.Union[MetaOapg.properties.expires_at, decimal.Decimal, int, ], + transcript: typing.Union[MetaOapg.properties.transcript, str, ], + data: typing.Union[MetaOapg.properties.data, str, ], + id: typing.Union[MetaOapg.properties.id, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Audio1': + return super().__new__( + cls, + *_args, + expires_at=expires_at, + transcript=transcript, + data=data, + id=id, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/audio2.py b/launch/api_client/model/audio2.py new file mode 100644 index 00000000..c9552fcf --- /dev/null +++ b/launch/api_client/model/audio2.py @@ -0,0 +1,140 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Audio2( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "voice", + "format", + } + + class properties: + + @staticmethod + def voice() -> typing.Type['VoiceIdsShared']: + return VoiceIdsShared + + + class format( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "wav": "WAV", + "aac": "AAC", + "mp3": "MP3", + "flac": "FLAC", + "opus": "OPUS", + "pcm16": "PCM16", + } + + @schemas.classproperty + def WAV(cls): + return cls("wav") + + @schemas.classproperty + def AAC(cls): + return cls("aac") + + @schemas.classproperty + def MP3(cls): + return cls("mp3") + + @schemas.classproperty + def FLAC(cls): + return cls("flac") + + @schemas.classproperty + def OPUS(cls): + return cls("opus") + + @schemas.classproperty + def PCM16(cls): + return cls("pcm16") + __annotations__ = { + "voice": voice, + "format": format, + } + + voice: 'VoiceIdsShared' + format: MetaOapg.properties.format + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["voice"]) -> 'VoiceIdsShared': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["voice", "format", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["voice"]) -> 'VoiceIdsShared': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["voice", "format", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + voice: 'VoiceIdsShared', + format: typing.Union[MetaOapg.properties.format, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Audio2': + return super().__new__( + cls, + *_args, + voice=voice, + format=format, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.voice_ids_shared import VoiceIdsShared diff --git a/launch/api_client/model/batch_completions_job.py b/launch/api_client/model/batch_completions_job.py new file mode 100644 index 00000000..6423df74 --- /dev/null +++ b/launch/api_client/model/batch_completions_job.py @@ -0,0 +1,289 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class BatchCompletionsJob( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "completed_at", + "metadata", + "expires_at", + "model_config", + "job_id", + "created_at", + "output_data_path", + "status", + } + + class properties: + job_id = schemas.StrSchema + output_data_path = schemas.StrSchema + + @staticmethod + def model_config() -> typing.Type['BatchCompletionsModelConfig']: + return BatchCompletionsModelConfig + + @staticmethod + def status() -> typing.Type['BatchCompletionsJobStatus']: + return BatchCompletionsJobStatus + created_at = schemas.StrSchema + expires_at = schemas.StrSchema + + + class completed_at( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'completed_at': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class input_data_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'input_data_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class priority( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "job_id": job_id, + "output_data_path": output_data_path, + "model_config": model_config, + "status": status, + "created_at": created_at, + "expires_at": expires_at, + "completed_at": completed_at, + "metadata": metadata, + "input_data_path": input_data_path, + "priority": priority, + } + + completed_at: MetaOapg.properties.completed_at + metadata: MetaOapg.properties.metadata + expires_at: MetaOapg.properties.expires_at + model_config: 'BatchCompletionsModelConfig' + job_id: MetaOapg.properties.job_id + created_at: MetaOapg.properties.created_at + output_data_path: MetaOapg.properties.output_data_path + status: 'BatchCompletionsJobStatus' + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "input_data_path", "priority", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "input_data_path", "priority", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + completed_at: typing.Union[MetaOapg.properties.completed_at, None, str, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, ], + expires_at: typing.Union[MetaOapg.properties.expires_at, str, ], + model_config: 'BatchCompletionsModelConfig', + job_id: typing.Union[MetaOapg.properties.job_id, str, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, ], + output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], + status: 'BatchCompletionsJobStatus', + input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, + priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'BatchCompletionsJob': + return super().__new__( + cls, + *_args, + completed_at=completed_at, + metadata=metadata, + expires_at=expires_at, + model_config=model_config, + job_id=job_id, + created_at=created_at, + output_data_path=output_data_path, + status=status, + input_data_path=input_data_path, + priority=priority, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.batch_completions_job_status import ( + BatchCompletionsJobStatus, +) +from launch.api_client.model.batch_completions_model_config import ( + BatchCompletionsModelConfig, +) diff --git a/launch/api_client/model/batch_job_status.pyi b/launch/api_client/model/batch_completions_job_status.py similarity index 56% rename from launch/api_client/model/batch_job_status.pyi rename to launch/api_client/model/batch_completions_job_status.py index 6ef4141c..a8cc49e2 100644 --- a/launch/api_client/model/batch_job_status.pyi +++ b/launch/api_client/model/batch_completions_job_status.py @@ -19,35 +19,51 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class BatchJobStatus(schemas.EnumBase, schemas.StrSchema): +from launch.api_client import schemas # noqa: F401 + + +class BatchCompletionsJobStatus( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + + class MetaOapg: + enum_value_to_name = { + "queued": "QUEUED", + "running": "RUNNING", + "completed": "COMPLETED", + "failed": "FAILED", + "cancelled": "CANCELLED", + "unknown": "UNKNOWN", + } + @schemas.classproperty - def PENDING(cls): - return cls("PENDING") + def QUEUED(cls): + return cls("queued") + @schemas.classproperty def RUNNING(cls): - return cls("RUNNING") + return cls("running") + @schemas.classproperty - def SUCCESS(cls): - return cls("SUCCESS") + def COMPLETED(cls): + return cls("completed") + @schemas.classproperty - def FAILURE(cls): - return cls("FAILURE") + def FAILED(cls): + return cls("failed") + @schemas.classproperty def CANCELLED(cls): - return cls("CANCELLED") - @schemas.classproperty - def UNDEFINED(cls): - return cls("UNDEFINED") + return cls("cancelled") + @schemas.classproperty - def TIMEOUT(cls): - return cls("TIMEOUT") + def UNKNOWN(cls): + return cls("unknown") diff --git a/launch/api_client/model/batch_completions_model_config.py b/launch/api_client/model/batch_completions_model_config.py new file mode 100644 index 00000000..317ee9e8 --- /dev/null +++ b/launch/api_client/model/batch_completions_model_config.py @@ -0,0 +1,1290 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class BatchCompletionsModelConfig( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "model", + } + + class properties: + model = schemas.StrSchema + + + class max_model_len( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_model_len': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_seqs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_seqs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enforce_eager( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enforce_eager': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class trust_remote_code( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'trust_remote_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class pipeline_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'pipeline_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tensor_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tensor_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_log_requests( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_log_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tool_call_parser( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tool_call_parser': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_auto_tool_choice( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_auto_tool_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class config_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'config_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer_mode( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_mode': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class limit_mm_per_prompt( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'limit_mm_per_prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_batched_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_batched_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class code_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'code_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class rope_scaling( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'rope_scaling': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class tokenizer_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization_param_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization_param_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_seq_len_to_capture( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_seq_len_to_capture': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_sliding_window( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_sliding_window': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_tokenizer_init( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_tokenizer_init': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class served_model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'served_model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class override_neuron_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'override_neuron_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class mm_processor_kwargs( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'mm_processor_kwargs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class block_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'block_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class gpu_memory_utilization( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpu_memory_utilization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class swap_space( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'swap_space': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cache_dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cache_dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_gpu_blocks_override( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_gpu_blocks_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_prefix_caching( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_prefix_caching': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_shards( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_shards': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_context_length( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_context_length': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class response_role( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'response_role': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "model": model, + "max_model_len": max_model_len, + "max_num_seqs": max_num_seqs, + "enforce_eager": enforce_eager, + "trust_remote_code": trust_remote_code, + "pipeline_parallel_size": pipeline_parallel_size, + "tensor_parallel_size": tensor_parallel_size, + "quantization": quantization, + "disable_log_requests": disable_log_requests, + "chat_template": chat_template, + "tool_call_parser": tool_call_parser, + "enable_auto_tool_choice": enable_auto_tool_choice, + "load_format": load_format, + "config_format": config_format, + "tokenizer_mode": tokenizer_mode, + "limit_mm_per_prompt": limit_mm_per_prompt, + "max_num_batched_tokens": max_num_batched_tokens, + "tokenizer": tokenizer, + "dtype": dtype, + "seed": seed, + "revision": revision, + "code_revision": code_revision, + "rope_scaling": rope_scaling, + "tokenizer_revision": tokenizer_revision, + "quantization_param_path": quantization_param_path, + "max_seq_len_to_capture": max_seq_len_to_capture, + "disable_sliding_window": disable_sliding_window, + "skip_tokenizer_init": skip_tokenizer_init, + "served_model_name": served_model_name, + "override_neuron_config": override_neuron_config, + "mm_processor_kwargs": mm_processor_kwargs, + "block_size": block_size, + "gpu_memory_utilization": gpu_memory_utilization, + "swap_space": swap_space, + "cache_dtype": cache_dtype, + "num_gpu_blocks_override": num_gpu_blocks_override, + "enable_prefix_caching": enable_prefix_caching, + "checkpoint_path": checkpoint_path, + "num_shards": num_shards, + "max_context_length": max_context_length, + "response_role": response_role, + } + + model: MetaOapg.properties.model + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_context_length"]) -> MetaOapg.properties.max_context_length: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["response_role"]) -> MetaOapg.properties.response_role: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_context_length"]) -> typing.Union[MetaOapg.properties.max_context_length, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["response_role"]) -> typing.Union[MetaOapg.properties.response_role, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + model: typing.Union[MetaOapg.properties.model, str, ], + max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, + trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, + pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, + disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, + enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, + load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, + config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, + tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, + limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, + max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, + dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, + code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, + rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, + quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, + max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, + skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, + served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, + override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, + num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_context_length: typing.Union[MetaOapg.properties.max_context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + response_role: typing.Union[MetaOapg.properties.response_role, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'BatchCompletionsModelConfig': + return super().__new__( + cls, + *_args, + model=model, + max_model_len=max_model_len, + max_num_seqs=max_num_seqs, + enforce_eager=enforce_eager, + trust_remote_code=trust_remote_code, + pipeline_parallel_size=pipeline_parallel_size, + tensor_parallel_size=tensor_parallel_size, + quantization=quantization, + disable_log_requests=disable_log_requests, + chat_template=chat_template, + tool_call_parser=tool_call_parser, + enable_auto_tool_choice=enable_auto_tool_choice, + load_format=load_format, + config_format=config_format, + tokenizer_mode=tokenizer_mode, + limit_mm_per_prompt=limit_mm_per_prompt, + max_num_batched_tokens=max_num_batched_tokens, + tokenizer=tokenizer, + dtype=dtype, + seed=seed, + revision=revision, + code_revision=code_revision, + rope_scaling=rope_scaling, + tokenizer_revision=tokenizer_revision, + quantization_param_path=quantization_param_path, + max_seq_len_to_capture=max_seq_len_to_capture, + disable_sliding_window=disable_sliding_window, + skip_tokenizer_init=skip_tokenizer_init, + served_model_name=served_model_name, + override_neuron_config=override_neuron_config, + mm_processor_kwargs=mm_processor_kwargs, + block_size=block_size, + gpu_memory_utilization=gpu_memory_utilization, + swap_space=swap_space, + cache_dtype=cache_dtype, + num_gpu_blocks_override=num_gpu_blocks_override, + enable_prefix_caching=enable_prefix_caching, + checkpoint_path=checkpoint_path, + num_shards=num_shards, + max_context_length=max_context_length, + response_role=response_role, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/batch_job_serialization_format.py b/launch/api_client/model/batch_job_serialization_format.py index 7bfc3103..7a95ec76 100644 --- a/launch/api_client/model/batch_job_serialization_format.py +++ b/launch/api_client/model/batch_job_serialization_format.py @@ -23,25 +23,27 @@ from launch.api_client import schemas # noqa: F401 -class BatchJobSerializationFormat(schemas.EnumBase, schemas.StrSchema): +class BatchJobSerializationFormat( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "JSON": "JSON", "PICKLE": "PICKLE", } - + @schemas.classproperty def JSON(cls): return cls("JSON") - + @schemas.classproperty def PICKLE(cls): return cls("PICKLE") diff --git a/launch/api_client/model/batch_job_serialization_format.pyi b/launch/api_client/model/batch_job_serialization_format.pyi deleted file mode 100644 index 7ebc5bff..00000000 --- a/launch/api_client/model/batch_job_serialization_format.pyi +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class BatchJobSerializationFormat(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An enumeration. - """ - - @schemas.classproperty - def JSON(cls): - return cls("JSON") - @schemas.classproperty - def PICKLE(cls): - return cls("PICKLE") diff --git a/launch/api_client/model/batch_job_status.py b/launch/api_client/model/batch_job_status.py index 1d4c7db3..01a30c4b 100644 --- a/launch/api_client/model/batch_job_status.py +++ b/launch/api_client/model/batch_job_status.py @@ -23,15 +23,17 @@ from launch.api_client import schemas # noqa: F401 -class BatchJobStatus(schemas.EnumBase, schemas.StrSchema): +class BatchJobStatus( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "PENDING": "PENDING", @@ -42,31 +44,31 @@ class MetaOapg: "UNDEFINED": "UNDEFINED", "TIMEOUT": "TIMEOUT", } - + @schemas.classproperty def PENDING(cls): return cls("PENDING") - + @schemas.classproperty def RUNNING(cls): return cls("RUNNING") - + @schemas.classproperty def SUCCESS(cls): return cls("SUCCESS") - + @schemas.classproperty def FAILURE(cls): return cls("FAILURE") - + @schemas.classproperty def CANCELLED(cls): return cls("CANCELLED") - + @schemas.classproperty def UNDEFINED(cls): return cls("UNDEFINED") - + @schemas.classproperty def TIMEOUT(cls): return cls("TIMEOUT") diff --git a/launch/api_client/model/body_upload_file_v1_files_post.py b/launch/api_client/model/body_upload_file_v1_files_post.py index c9787a4b..d0359487 100644 --- a/launch/api_client/model/body_upload_file_v1_files_post.py +++ b/launch/api_client/model/body_upload_file_v1_files_post.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class BodyUploadFileV1FilesPost(schemas.DictSchema): +class BodyUploadFileV1FilesPost( + schemas.AnyTypeSchema, +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -36,7 +38,58 @@ class MetaOapg: } class properties: - file = schemas.BinarySchema + class file( + schemas.BinaryBase, + schemas.AnyTypeSchema, + ): + class MetaOapg: + format = "binary" + + def __new__( + cls, + *_args: typing.Union[ + dict, + frozendict.frozendict, + str, + date, + datetime, + uuid.UUID, + int, + float, + decimal.Decimal, + bool, + None, + list, + tuple, + bytes, + io.FileIO, + io.BufferedReader, + ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[ + schemas.AnyTypeSchema, + dict, + frozendict.frozendict, + str, + date, + datetime, + uuid.UUID, + int, + float, + decimal.Decimal, + None, + list, + tuple, + bytes, + ], + ) -> "file": + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + __annotations__ = { "file": file, } @@ -54,7 +107,9 @@ def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: def __getitem__( self, name: typing.Union[ - typing_extensions.Literal["file",], + typing_extensions.Literal[ + "file", + ], str, ], ): @@ -72,7 +127,9 @@ def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, s def get_item_oapg( self, name: typing.Union[ - typing_extensions.Literal["file",], + typing_extensions.Literal[ + "file", + ], str, ], ): @@ -83,9 +140,36 @@ def __new__( *_args: typing.Union[ dict, frozendict.frozendict, + str, + date, + datetime, + uuid.UUID, + int, + float, + decimal.Decimal, + bool, + None, + list, + tuple, + bytes, + io.FileIO, + io.BufferedReader, ], file: typing.Union[ MetaOapg.properties.file, + dict, + frozendict.frozendict, + str, + date, + datetime, + uuid.UUID, + int, + float, + decimal.Decimal, + bool, + None, + list, + tuple, bytes, io.FileIO, io.BufferedReader, diff --git a/launch/api_client/model/body_upload_file_v1_files_post.pyi b/launch/api_client/model/body_upload_file_v1_files_post.pyi deleted file mode 100644 index 73df09a0..00000000 --- a/launch/api_client/model/body_upload_file_v1_files_post.pyi +++ /dev/null @@ -1,104 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class BodyUploadFileV1FilesPost(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "file", - } - - class properties: - file = schemas.BinarySchema - __annotations__ = { - "file": file, - } - file: MetaOapg.properties.file - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["file"]) -> MetaOapg.properties.file: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["file",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["file"]) -> MetaOapg.properties.file: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["file",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - file: typing.Union[ - MetaOapg.properties.file, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "BodyUploadFileV1FilesPost": - return super().__new__( - cls, - *_args, - file=file, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/callback_auth.py b/launch/api_client/model/callback_auth.py index 493310d9..558e75d3 100644 --- a/launch/api_client/model/callback_auth.py +++ b/launch/api_client/model/callback_auth.py @@ -32,18 +32,20 @@ class CallbackAuth( Do not edit the class manually. """ + class MetaOapg: + @staticmethod def discriminator(): return { - "kind": { - "CallbackBasicAuth": CallbackBasicAuth, - "CallbackmTLSAuth": CallbackmTLSAuth, - "basic": CallbackBasicAuth, - "mtls": CallbackmTLSAuth, + 'kind': { + 'CallbackBasicAuth': CallbackBasicAuth, + 'CallbackmTLSAuth': CallbackmTLSAuth, + 'basic': CallbackBasicAuth, + 'mtls': CallbackmTLSAuth, } } - + @classmethod @functools.lru_cache() def one_of(cls): @@ -59,44 +61,13 @@ def one_of(cls): CallbackmTLSAuth, ] + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CallbackAuth": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CallbackAuth': return super().__new__( cls, *_args, @@ -104,6 +75,5 @@ def __new__( **kwargs, ) - from launch.api_client.model.callback_basic_auth import CallbackBasicAuth from launch.api_client.model.callbackm_tls_auth import CallbackmTLSAuth diff --git a/launch/api_client/model/callback_basic_auth.py b/launch/api_client/model/callback_basic_auth.py index 7e2027d6..eec2406c 100644 --- a/launch/api_client/model/callback_basic_auth.py +++ b/launch/api_client/model/callback_basic_auth.py @@ -23,138 +23,94 @@ from launch.api_client import schemas # noqa: F401 -class CallbackBasicAuth(schemas.DictSchema): +class CallbackBasicAuth( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "password", "kind", "username", } - + class properties: - class kind(schemas.EnumBase, schemas.StrSchema): + + + class kind( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { "basic": "BASIC", } - + @schemas.classproperty def BASIC(cls): return cls("basic") - - password = schemas.StrSchema username = schemas.StrSchema + password = schemas.StrSchema __annotations__ = { "kind": kind, - "password": password, "username": username, + "password": password, } - + password: MetaOapg.properties.password kind: MetaOapg.properties.kind username: MetaOapg.properties.username - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: - ... - + def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: - ... - + def __getitem__(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: - ... - + def __getitem__(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "kind", - "password", - "username", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["kind", "username", "password", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "kind", - "password", - "username", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["kind", "username", "password", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - password: typing.Union[ - MetaOapg.properties.password, - str, - ], - kind: typing.Union[ - MetaOapg.properties.kind, - str, - ], - username: typing.Union[ - MetaOapg.properties.username, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + password: typing.Union[MetaOapg.properties.password, str, ], + kind: typing.Union[MetaOapg.properties.kind, str, ], + username: typing.Union[MetaOapg.properties.username, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CallbackBasicAuth": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CallbackBasicAuth': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/callback_basic_auth.pyi b/launch/api_client/model/callback_basic_auth.pyi deleted file mode 100644 index 39f43b29..00000000 --- a/launch/api_client/model/callback_basic_auth.pyi +++ /dev/null @@ -1,139 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CallbackBasicAuth(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "password", - "kind", - "username", - } - - class properties: - class kind(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def BASIC(cls): - return cls("basic") - password = schemas.StrSchema - username = schemas.StrSchema - __annotations__ = { - "kind": kind, - "password": password, - "username": username, - } - password: MetaOapg.properties.password - kind: MetaOapg.properties.kind - username: MetaOapg.properties.username - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "kind", - "password", - "username", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["password"]) -> MetaOapg.properties.password: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["username"]) -> MetaOapg.properties.username: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "kind", - "password", - "username", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - password: typing.Union[ - MetaOapg.properties.password, - str, - ], - kind: typing.Union[ - MetaOapg.properties.kind, - str, - ], - username: typing.Union[ - MetaOapg.properties.username, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CallbackBasicAuth": - return super().__new__( - cls, - *_args, - password=password, - kind=kind, - username=username, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/callbackm_tls_auth.py b/launch/api_client/model/callbackm_tls_auth.py index c153d503..2c1300cd 100644 --- a/launch/api_client/model/callbackm_tls_auth.py +++ b/launch/api_client/model/callbackm_tls_auth.py @@ -23,139 +23,94 @@ from launch.api_client import schemas # noqa: F401 -class CallbackmTLSAuth(schemas.DictSchema): +class CallbackmTLSAuth( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "kind", "cert", "key", } - + class properties: - cert = schemas.StrSchema - key = schemas.StrSchema - - class kind(schemas.EnumBase, schemas.StrSchema): + + + class kind( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { "mtls": "MTLS", } - + @schemas.classproperty def MTLS(cls): return cls("mtls") - + cert = schemas.StrSchema + key = schemas.StrSchema __annotations__ = { + "kind": kind, "cert": cert, "key": key, - "kind": kind, } - + kind: MetaOapg.properties.kind cert: MetaOapg.properties.cert key: MetaOapg.properties.key - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: - ... - + def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: - ... - + def __getitem__(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: - ... - + def __getitem__(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cert", - "key", - "kind", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["kind", "cert", "key", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cert", - "key", - "kind", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["kind", "cert", "key", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - kind: typing.Union[ - MetaOapg.properties.kind, - str, - ], - cert: typing.Union[ - MetaOapg.properties.cert, - str, - ], - key: typing.Union[ - MetaOapg.properties.key, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + kind: typing.Union[MetaOapg.properties.kind, str, ], + cert: typing.Union[MetaOapg.properties.cert, str, ], + key: typing.Union[MetaOapg.properties.key, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CallbackmTLSAuth": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CallbackmTLSAuth': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/callbackm_tls_auth.pyi b/launch/api_client/model/callbackm_tls_auth.pyi deleted file mode 100644 index ef8b7dc0..00000000 --- a/launch/api_client/model/callbackm_tls_auth.pyi +++ /dev/null @@ -1,140 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CallbackmTLSAuth(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "kind", - "cert", - "key", - } - - class properties: - cert = schemas.StrSchema - key = schemas.StrSchema - - class kind(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def MTLS(cls): - return cls("mtls") - __annotations__ = { - "cert": cert, - "key": key, - "kind": kind, - } - kind: MetaOapg.properties.kind - cert: MetaOapg.properties.cert - key: MetaOapg.properties.key - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cert", - "key", - "kind", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cert"]) -> MetaOapg.properties.cert: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["key"]) -> MetaOapg.properties.key: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["kind"]) -> MetaOapg.properties.kind: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cert", - "key", - "kind", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - kind: typing.Union[ - MetaOapg.properties.kind, - str, - ], - cert: typing.Union[ - MetaOapg.properties.cert, - str, - ], - key: typing.Union[ - MetaOapg.properties.key, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CallbackmTLSAuth": - return super().__new__( - cls, - *_args, - kind=kind, - cert=cert, - key=key, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/cancel_fine_tune_response.pyi b/launch/api_client/model/cancel_batch_completions_v2_response.py similarity index 65% rename from launch/api_client/model/cancel_fine_tune_response.pyi rename to launch/api_client/model/cancel_batch_completions_v2_response.py index d0de8716..9f1f67ea 100644 --- a/launch/api_client/model/cancel_fine_tune_response.pyi +++ b/launch/api_client/model/cancel_batch_completions_v2_response.py @@ -19,80 +19,61 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class CancelFineTuneResponse(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class CancelBatchCompletionsV2Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "success", } - + class properties: success = schemas.BoolSchema __annotations__ = { "success": success, } + success: MetaOapg.properties.success - + @typing.overload def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + success: typing.Union[MetaOapg.properties.success, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CancelFineTuneResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CancelBatchCompletionsV2Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/cancel_fine_tune_job_response.py b/launch/api_client/model/cancel_fine_tune_job_response.py index 895d2efb..f83a8251 100644 --- a/launch/api_client/model/cancel_fine_tune_job_response.py +++ b/launch/api_client/model/cancel_fine_tune_job_response.py @@ -54,7 +54,9 @@ def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: def __getitem__( self, name: typing.Union[ - typing_extensions.Literal["success",], + typing_extensions.Literal[ + "success", + ], str, ], ): @@ -72,7 +74,9 @@ def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, s def get_item_oapg( self, name: typing.Union[ - typing_extensions.Literal["success",], + typing_extensions.Literal[ + "success", + ], str, ], ): diff --git a/launch/api_client/model/cancel_fine_tune_job_response.pyi b/launch/api_client/model/cancel_fine_tune_job_response.pyi deleted file mode 100644 index d0de8716..00000000 --- a/launch/api_client/model/cancel_fine_tune_job_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CancelFineTuneResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CancelFineTuneResponse": - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/cancel_fine_tune_response.py b/launch/api_client/model/cancel_fine_tune_response.py index 895d2efb..08f8438a 100644 --- a/launch/api_client/model/cancel_fine_tune_response.py +++ b/launch/api_client/model/cancel_fine_tune_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CancelFineTuneResponse(schemas.DictSchema): +class CancelFineTuneResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "success", } - + class properties: success = schemas.BoolSchema __annotations__ = { "success": success, } - + success: MetaOapg.properties.success - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + success: typing.Union[MetaOapg.properties.success, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CancelFineTuneResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CancelFineTuneResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/upload_file_response.pyi b/launch/api_client/model/chat_completion_function_call_option.py similarity index 52% rename from launch/api_client/model/upload_file_response.pyi rename to launch/api_client/model/chat_completion_function_call_option.py index acf6d370..5e7af1b0 100644 --- a/launch/api_client/model/upload_file_response.pyi +++ b/launch/api_client/model/chat_completion_function_call_option.py @@ -19,86 +19,65 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class UploadFileResponse(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionFunctionCallOption( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - Response object for uploading a file. """ + class MetaOapg: required = { - "id", + "name", } - + class properties: - id = schemas.StrSchema + name = schemas.StrSchema __annotations__ = { - "id": id, + "name": name, } - id: MetaOapg.properties.id - + + name: MetaOapg.properties.name + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UploadFileResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionFunctionCallOption': return super().__new__( cls, *_args, - id=id, + name=name, _configuration=_configuration, **kwargs, ) diff --git a/launch/api_client/model/chat_completion_functions.py b/launch/api_client/model/chat_completion_functions.py new file mode 100644 index 00000000..9135c5bd --- /dev/null +++ b/launch/api_client/model/chat_completion_functions.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionFunctions( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "name", + } + + class properties: + name = schemas.StrSchema + + + class description( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'description': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def parameters() -> typing.Type['FunctionParameters']: + return FunctionParameters + __annotations__ = { + "name": name, + "description": description, + "parameters": parameters, + } + + name: MetaOapg.properties.name + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["parameters"]) -> 'FunctionParameters': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["parameters"]) -> typing.Union['FunctionParameters', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], + description: typing.Union[MetaOapg.properties.description, None, str, schemas.Unset] = schemas.unset, + parameters: typing.Union['FunctionParameters', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionFunctions': + return super().__new__( + cls, + *_args, + name=name, + description=description, + parameters=parameters, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.function_parameters import FunctionParameters diff --git a/launch/api_client/model/chat_completion_message_tool_call.py b/launch/api_client/model/chat_completion_message_tool_call.py new file mode 100644 index 00000000..01ca895a --- /dev/null +++ b/launch/api_client/model/chat_completion_message_tool_call.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionMessageToolCall( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "function", + "id", + "type", + } + + class properties: + id = schemas.StrSchema + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "function": "FUNCTION", + } + + @schemas.classproperty + def FUNCTION(cls): + return cls("function") + + @staticmethod + def function() -> typing.Type['Function1']: + return Function1 + __annotations__ = { + "id": id, + "type": type, + "function": function, + } + + function: 'Function1' + id: MetaOapg.properties.id + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'Function1': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "type", "function", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> 'Function1': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "type", "function", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + function: 'Function1', + id: typing.Union[MetaOapg.properties.id, str, ], + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionMessageToolCall': + return super().__new__( + cls, + *_args, + function=function, + id=id, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.function1 import Function1 diff --git a/launch/api_client/model/chat_completion_message_tool_call_chunk.py b/launch/api_client/model/chat_completion_message_tool_call_chunk.py new file mode 100644 index 00000000..86b0955d --- /dev/null +++ b/launch/api_client/model/chat_completion_message_tool_call_chunk.py @@ -0,0 +1,167 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionMessageToolCallChunk( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "index", + } + + class properties: + index = schemas.IntSchema + + + class id( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class type( + schemas.EnumBase, + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + class MetaOapg: + enum_value_to_name = { + "function": "FUNCTION", + } + + @schemas.classproperty + def FUNCTION(cls): + return cls("function") + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'type': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def function() -> typing.Type['Function2']: + return Function2 + __annotations__ = { + "index": index, + "id": id, + "type": type, + "function": function, + } + + index: MetaOapg.properties.index + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'Function2': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["index", "id", "type", "function", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> typing.Union[MetaOapg.properties.id, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> typing.Union[MetaOapg.properties.type, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> typing.Union['Function2', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["index", "id", "type", "function", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], + id: typing.Union[MetaOapg.properties.id, None, str, schemas.Unset] = schemas.unset, + type: typing.Union[MetaOapg.properties.type, None, str, schemas.Unset] = schemas.unset, + function: typing.Union['Function2', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionMessageToolCallChunk': + return super().__new__( + cls, + *_args, + index=index, + id=id, + type=type, + function=function, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.function2 import Function2 diff --git a/launch/api_client/model/chat_completion_message_tool_calls_input.py b/launch/api_client/model/chat_completion_message_tool_calls_input.py new file mode 100644 index 00000000..05b2219f --- /dev/null +++ b/launch/api_client/model/chat_completion_message_tool_calls_input.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionMessageToolCallsInput( + schemas.ListSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + The tool calls generated by the model, such as function calls. + """ + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionMessageToolCall']: + return ChatCompletionMessageToolCall + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionMessageToolCall'], typing.List['ChatCompletionMessageToolCall']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ChatCompletionMessageToolCallsInput': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionMessageToolCall': + return super().__getitem__(i) + +from launch.api_client.model.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall, +) diff --git a/launch/api_client/model/chat_completion_message_tool_calls_output.py b/launch/api_client/model/chat_completion_message_tool_calls_output.py new file mode 100644 index 00000000..be835da5 --- /dev/null +++ b/launch/api_client/model/chat_completion_message_tool_calls_output.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionMessageToolCallsOutput( + schemas.ListSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + The tool calls generated by the model, such as function calls. + """ + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionMessageToolCall']: + return ChatCompletionMessageToolCall + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionMessageToolCall'], typing.List['ChatCompletionMessageToolCall']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ChatCompletionMessageToolCallsOutput': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionMessageToolCall': + return super().__getitem__(i) + +from launch.api_client.model.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall, +) diff --git a/launch/api_client/model/chat_completion_named_tool_choice.py b/launch/api_client/model/chat_completion_named_tool_choice.py new file mode 100644 index 00000000..f2e531bc --- /dev/null +++ b/launch/api_client/model/chat_completion_named_tool_choice.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionNamedToolChoice( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "function", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "function": "FUNCTION", + } + + @schemas.classproperty + def FUNCTION(cls): + return cls("function") + + @staticmethod + def function() -> typing.Type['Function3']: + return Function3 + __annotations__ = { + "type": type, + "function": function, + } + + function: 'Function3' + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'Function3': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> 'Function3': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + function: 'Function3', + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionNamedToolChoice': + return super().__new__( + cls, + *_args, + function=function, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.function3 import Function3 diff --git a/launch/api_client/model/chat_completion_request_assistant_message.py b/launch/api_client/model/chat_completion_request_assistant_message.py new file mode 100644 index 00000000..d3b879b3 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_assistant_message.py @@ -0,0 +1,249 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestAssistantMessage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "role", + } + + class properties: + + + class role( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "assistant": "ASSISTANT", + } + + @schemas.classproperty + def ASSISTANT(cls): + return cls("assistant") + + + class content( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + Content, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class refusal( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'refusal': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def audio() -> typing.Type['Audio']: + return Audio + + @staticmethod + def tool_calls() -> typing.Type['ChatCompletionMessageToolCallsInput']: + return ChatCompletionMessageToolCallsInput + + @staticmethod + def function_call() -> typing.Type['FunctionCall']: + return FunctionCall + __annotations__ = { + "role": role, + "content": content, + "refusal": refusal, + "name": name, + "audio": audio, + "tool_calls": tool_calls, + "function_call": function_call, + } + + role: MetaOapg.properties.role + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_calls"]) -> 'ChatCompletionMessageToolCallsInput': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> 'FunctionCall': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "name", "audio", "tool_calls", "function_call", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> typing.Union[MetaOapg.properties.refusal, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_calls"]) -> typing.Union['ChatCompletionMessageToolCallsInput', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union['FunctionCall', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "name", "audio", "tool_calls", "function_call", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + role: typing.Union[MetaOapg.properties.role, str, ], + content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + refusal: typing.Union[MetaOapg.properties.refusal, None, str, schemas.Unset] = schemas.unset, + name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, + audio: typing.Union['Audio', schemas.Unset] = schemas.unset, + tool_calls: typing.Union['ChatCompletionMessageToolCallsInput', schemas.Unset] = schemas.unset, + function_call: typing.Union['FunctionCall', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestAssistantMessage': + return super().__new__( + cls, + *_args, + role=role, + content=content, + refusal=refusal, + name=name, + audio=audio, + tool_calls=tool_calls, + function_call=function_call, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.audio import Audio +from launch.api_client.model.chat_completion_message_tool_calls_input import ( + ChatCompletionMessageToolCallsInput, +) +from launch.api_client.model.content import Content +from launch.api_client.model.function_call import FunctionCall diff --git a/launch/api_client/model/callback_auth.pyi b/launch/api_client/model/chat_completion_request_assistant_message_content_part.py similarity index 52% rename from launch/api_client/model/callback_auth.pyi rename to launch/api_client/model/chat_completion_request_assistant_message_content_part.py index e7193634..9b39c897 100644 --- a/launch/api_client/model/callback_auth.pyi +++ b/launch/api_client/model/chat_completion_request_assistant_message_content_part.py @@ -19,9 +19,11 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class CallbackAuth( +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestAssistantMessageContentPart( schemas.ComposedSchema, ): """NOTE: This class is auto generated by OpenAPI Generator. @@ -30,20 +32,12 @@ class CallbackAuth( Do not edit the class manually. """ + class MetaOapg: - @staticmethod - def discriminator(): - return { - "kind": { - "CallbackBasicAuth": CallbackBasicAuth, - "CallbackmTLSAuth": CallbackmTLSAuth, - "basic": CallbackBasicAuth, - "mtls": CallbackmTLSAuth, - } - } + @classmethod @functools.lru_cache() - def one_of(cls): + def any_of(cls): # we need this here to make our import statements work # we must store _composed_schemas in here so the code is only run # when we invoke this method. If we kept this at the class @@ -52,47 +46,17 @@ def one_of(cls): # classes don't exist yet because their module has not finished # loading return [ - CallbackBasicAuth, - CallbackmTLSAuth, + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartRefusal, ] + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CallbackAuth": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestAssistantMessageContentPart': return super().__new__( cls, *_args, @@ -100,5 +64,9 @@ def __new__( **kwargs, ) -from launch_client.model.callback_basic_auth import CallbackBasicAuth -from launch_client.model.callbackm_tls_auth import CallbackmTLSAuth +from launch.api_client.model.chat_completion_request_message_content_part_refusal import ( + ChatCompletionRequestMessageContentPartRefusal, +) +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) diff --git a/launch/api_client/model/chat_completion_request_developer_message.py b/launch/api_client/model/chat_completion_request_developer_message.py new file mode 100644 index 00000000..c672eddb --- /dev/null +++ b/launch/api_client/model/chat_completion_request_developer_message.py @@ -0,0 +1,178 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestDeveloperMessage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "role", + "content", + } + + class properties: + + + class content( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + Content1, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class role( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "developer": "DEVELOPER", + } + + @schemas.classproperty + def DEVELOPER(cls): + return cls("developer") + + + class name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "content": content, + "role": role, + "name": name, + } + + role: MetaOapg.properties.role + content: MetaOapg.properties.content + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + role: typing.Union[MetaOapg.properties.role, str, ], + content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestDeveloperMessage': + return super().__new__( + cls, + *_args, + role=role, + content=content, + name=name, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.content1 import Content1 diff --git a/launch/api_client/model/chat_completion_request_function_message.py b/launch/api_client/model/chat_completion_request_function_message.py new file mode 100644 index 00000000..de175511 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_function_message.py @@ -0,0 +1,139 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestFunctionMessage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "role", + "name", + } + + class properties: + + + class role( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "function": "FUNCTION", + } + + @schemas.classproperty + def FUNCTION(cls): + return cls("function") + name = schemas.StrSchema + + + class content( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "role": role, + "name": name, + "content": content, + } + + role: MetaOapg.properties.role + name: MetaOapg.properties.name + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "name", "content", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "name", "content", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + role: typing.Union[MetaOapg.properties.role, str, ], + name: typing.Union[MetaOapg.properties.name, str, ], + content: typing.Union[MetaOapg.properties.content, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestFunctionMessage': + return super().__new__( + cls, + *_args, + role=role, + name=name, + content=content, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/chat_completion_request_message.py b/launch/api_client/model/chat_completion_request_message.py new file mode 100644 index 00000000..cb489ee6 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_message.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestMessage( + schemas.ComposedSchema, +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + ChatCompletionRequestDeveloperMessage, + ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, + ChatCompletionRequestAssistantMessage, + ChatCompletionRequestToolMessage, + ChatCompletionRequestFunctionMessage, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestMessage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_request_assistant_message import ( + ChatCompletionRequestAssistantMessage, +) +from launch.api_client.model.chat_completion_request_developer_message import ( + ChatCompletionRequestDeveloperMessage, +) +from launch.api_client.model.chat_completion_request_function_message import ( + ChatCompletionRequestFunctionMessage, +) +from launch.api_client.model.chat_completion_request_system_message import ( + ChatCompletionRequestSystemMessage, +) +from launch.api_client.model.chat_completion_request_tool_message import ( + ChatCompletionRequestToolMessage, +) +from launch.api_client.model.chat_completion_request_user_message import ( + ChatCompletionRequestUserMessage, +) diff --git a/launch/api_client/model/chat_completion_request_message_content_part_audio.py b/launch/api_client/model/chat_completion_request_message_content_part_audio.py new file mode 100644 index 00000000..30e8f44f --- /dev/null +++ b/launch/api_client/model/chat_completion_request_message_content_part_audio.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestMessageContentPartAudio( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "input_audio", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "input_audio": "INPUT_AUDIO", + } + + @schemas.classproperty + def INPUT_AUDIO(cls): + return cls("input_audio") + + @staticmethod + def input_audio() -> typing.Type['InputAudio']: + return InputAudio + __annotations__ = { + "type": type, + "input_audio": input_audio, + } + + input_audio: 'InputAudio' + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["input_audio"]) -> 'InputAudio': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "input_audio", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["input_audio"]) -> 'InputAudio': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "input_audio", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + input_audio: 'InputAudio', + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestMessageContentPartAudio': + return super().__new__( + cls, + *_args, + input_audio=input_audio, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.input_audio import InputAudio diff --git a/launch/api_client/model/chat_completion_request_message_content_part_file.py b/launch/api_client/model/chat_completion_request_message_content_part_file.py new file mode 100644 index 00000000..03fecbdd --- /dev/null +++ b/launch/api_client/model/chat_completion_request_message_content_part_file.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestMessageContentPartFile( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "file", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "file": "FILE", + } + + @schemas.classproperty + def FILE(cls): + return cls("file") + + @staticmethod + def file() -> typing.Type['File']: + return File + __annotations__ = { + "type": type, + "file": file, + } + + file: 'File' + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["file"]) -> 'File': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "file", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["file"]) -> 'File': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "file", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + file: 'File', + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestMessageContentPartFile': + return super().__new__( + cls, + *_args, + file=file, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.file import File diff --git a/launch/api_client/model/chat_completion_request_message_content_part_image.py b/launch/api_client/model/chat_completion_request_message_content_part_image.py new file mode 100644 index 00000000..a3a6b355 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_message_content_part_image.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestMessageContentPartImage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "image_url", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "image_url": "IMAGE_URL", + } + + @schemas.classproperty + def IMAGE_URL(cls): + return cls("image_url") + + @staticmethod + def image_url() -> typing.Type['ImageUrl']: + return ImageUrl + __annotations__ = { + "type": type, + "image_url": image_url, + } + + image_url: 'ImageUrl' + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["image_url"]) -> 'ImageUrl': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "image_url", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["image_url"]) -> 'ImageUrl': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "image_url", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + image_url: 'ImageUrl', + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestMessageContentPartImage': + return super().__new__( + cls, + *_args, + image_url=image_url, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.image_url import ImageUrl diff --git a/launch/api_client/model/chat_completion_request_message_content_part_refusal.py b/launch/api_client/model/chat_completion_request_message_content_part_refusal.py new file mode 100644 index 00000000..760f54bf --- /dev/null +++ b/launch/api_client/model/chat_completion_request_message_content_part_refusal.py @@ -0,0 +1,110 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestMessageContentPartRefusal( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "refusal", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "refusal": "REFUSAL", + } + + @schemas.classproperty + def REFUSAL(cls): + return cls("refusal") + refusal = schemas.StrSchema + __annotations__ = { + "type": type, + "refusal": refusal, + } + + refusal: MetaOapg.properties.refusal + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "refusal", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "refusal", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + refusal: typing.Union[MetaOapg.properties.refusal, str, ], + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestMessageContentPartRefusal': + return super().__new__( + cls, + *_args, + refusal=refusal, + type=type, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/chat_completion_request_message_content_part_text.py b/launch/api_client/model/chat_completion_request_message_content_part_text.py new file mode 100644 index 00000000..5c6d9f64 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_message_content_part_text.py @@ -0,0 +1,110 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestMessageContentPartText( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "text", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "text": "TEXT", + } + + @schemas.classproperty + def TEXT(cls): + return cls("text") + text = schemas.StrSchema + __annotations__ = { + "type": type, + "text": text, + } + + text: MetaOapg.properties.text + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "text", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "text", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + text: typing.Union[MetaOapg.properties.text, str, ], + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestMessageContentPartText': + return super().__new__( + cls, + *_args, + text=text, + type=type, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/chat_completion_request_system_message.py b/launch/api_client/model/chat_completion_request_system_message.py new file mode 100644 index 00000000..2a05618c --- /dev/null +++ b/launch/api_client/model/chat_completion_request_system_message.py @@ -0,0 +1,178 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestSystemMessage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "role", + "content", + } + + class properties: + + + class content( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + Content2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class role( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "system": "SYSTEM", + } + + @schemas.classproperty + def SYSTEM(cls): + return cls("system") + + + class name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "content": content, + "role": role, + "name": name, + } + + role: MetaOapg.properties.role + content: MetaOapg.properties.content + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + role: typing.Union[MetaOapg.properties.role, str, ], + content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestSystemMessage': + return super().__new__( + cls, + *_args, + role=role, + content=content, + name=name, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.content2 import Content2 diff --git a/launch/api_client/model/llm_source.pyi b/launch/api_client/model/chat_completion_request_system_message_content_part.py similarity index 60% rename from launch/api_client/model/llm_source.pyi rename to launch/api_client/model/chat_completion_request_system_message_content_part.py index 4df8815e..0b95016e 100644 --- a/launch/api_client/model/llm_source.pyi +++ b/launch/api_client/model/chat_completion_request_system_message_content_part.py @@ -19,17 +19,7 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class LLMSource(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech +from launch.api_client import schemas # noqa: F401 - Do not edit the class manually. - - An enumeration. - """ - - @schemas.classproperty - def HUGGING_FACE(cls): - return cls("hugging_face") +ChatCompletionRequestSystemMessageContentPart = schemas.Schema diff --git a/launch/api_client/model/chat_completion_request_tool_message.py b/launch/api_client/model/chat_completion_request_tool_message.py new file mode 100644 index 00000000..8dbf2896 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_tool_message.py @@ -0,0 +1,161 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestToolMessage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "role", + "tool_call_id", + "content", + } + + class properties: + + + class role( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "tool": "TOOL", + } + + @schemas.classproperty + def TOOL(cls): + return cls("tool") + + + class content( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + Content3, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + tool_call_id = schemas.StrSchema + __annotations__ = { + "role": role, + "content": content, + "tool_call_id": tool_call_id, + } + + role: MetaOapg.properties.role + tool_call_id: MetaOapg.properties.tool_call_id + content: MetaOapg.properties.content + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_call_id"]) -> MetaOapg.properties.tool_call_id: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "content", "tool_call_id", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_call_id"]) -> MetaOapg.properties.tool_call_id: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "content", "tool_call_id", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + role: typing.Union[MetaOapg.properties.role, str, ], + tool_call_id: typing.Union[MetaOapg.properties.tool_call_id, str, ], + content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestToolMessage': + return super().__new__( + cls, + *_args, + role=role, + tool_call_id=tool_call_id, + content=content, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.content3 import Content3 diff --git a/launch/api_client/model/request_schema.pyi b/launch/api_client/model/chat_completion_request_tool_message_content_part.py similarity index 82% rename from launch/api_client/model/request_schema.pyi rename to launch/api_client/model/chat_completion_request_tool_message_content_part.py index 031787ad..64d1bcd0 100644 --- a/launch/api_client/model/request_schema.pyi +++ b/launch/api_client/model/chat_completion_request_tool_message_content_part.py @@ -19,6 +19,7 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -RequestSchema = schemas.AnyTypeSchema +from launch.api_client import schemas # noqa: F401 + +ChatCompletionRequestToolMessageContentPart = schemas.Schema diff --git a/launch/api_client/model/chat_completion_request_user_message.py b/launch/api_client/model/chat_completion_request_user_message.py new file mode 100644 index 00000000..8e3af6f9 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_user_message.py @@ -0,0 +1,178 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestUserMessage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "role", + "content", + } + + class properties: + + + class content( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + Content4, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class role( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "user": "USER", + } + + @schemas.classproperty + def USER(cls): + return cls("user") + + + class name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "content": content, + "role": role, + "name": name, + } + + role: MetaOapg.properties.role + content: MetaOapg.properties.content + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "role", "name", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + role: typing.Union[MetaOapg.properties.role, str, ], + content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestUserMessage': + return super().__new__( + cls, + *_args, + role=role, + content=content, + name=name, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.content4 import Content4 diff --git a/launch/api_client/model/chat_completion_request_user_message_content_part.py b/launch/api_client/model/chat_completion_request_user_message_content_part.py new file mode 100644 index 00000000..94a74888 --- /dev/null +++ b/launch/api_client/model/chat_completion_request_user_message_content_part.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionRequestUserMessageContentPart( + schemas.ComposedSchema, +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + ChatCompletionRequestMessageContentPartText, + ChatCompletionRequestMessageContentPartImage, + ChatCompletionRequestMessageContentPartAudio, + ChatCompletionRequestMessageContentPartFile, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionRequestUserMessageContentPart': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_request_message_content_part_audio import ( + ChatCompletionRequestMessageContentPartAudio, +) +from launch.api_client.model.chat_completion_request_message_content_part_file import ( + ChatCompletionRequestMessageContentPartFile, +) +from launch.api_client.model.chat_completion_request_message_content_part_image import ( + ChatCompletionRequestMessageContentPartImage, +) +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) diff --git a/launch/api_client/model/chat_completion_response_message.py b/launch/api_client/model/chat_completion_response_message.py new file mode 100644 index 00000000..d64ed7d3 --- /dev/null +++ b/launch/api_client/model/chat_completion_response_message.py @@ -0,0 +1,238 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionResponseMessage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "role", + } + + class properties: + + + class role( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "assistant": "ASSISTANT", + } + + @schemas.classproperty + def ASSISTANT(cls): + return cls("assistant") + + + class content( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class refusal( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'refusal': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def tool_calls() -> typing.Type['ChatCompletionMessageToolCallsOutput']: + return ChatCompletionMessageToolCallsOutput + + + class annotations( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['Annotation']: + return Annotation + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'annotations': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def function_call() -> typing.Type['FunctionCall']: + return FunctionCall + + @staticmethod + def audio() -> typing.Type['Audio1']: + return Audio1 + __annotations__ = { + "role": role, + "content": content, + "refusal": refusal, + "tool_calls": tool_calls, + "annotations": annotations, + "function_call": function_call, + "audio": audio, + } + + role: MetaOapg.properties.role + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_calls"]) -> 'ChatCompletionMessageToolCallsOutput': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["annotations"]) -> MetaOapg.properties.annotations: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> 'FunctionCall': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio1': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "tool_calls", "annotations", "function_call", "audio", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> typing.Union[MetaOapg.properties.refusal, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_calls"]) -> typing.Union['ChatCompletionMessageToolCallsOutput', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["annotations"]) -> typing.Union[MetaOapg.properties.annotations, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union['FunctionCall', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio1', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["role", "content", "refusal", "tool_calls", "annotations", "function_call", "audio", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + role: typing.Union[MetaOapg.properties.role, str, ], + content: typing.Union[MetaOapg.properties.content, None, str, schemas.Unset] = schemas.unset, + refusal: typing.Union[MetaOapg.properties.refusal, None, str, schemas.Unset] = schemas.unset, + tool_calls: typing.Union['ChatCompletionMessageToolCallsOutput', schemas.Unset] = schemas.unset, + annotations: typing.Union[MetaOapg.properties.annotations, list, tuple, None, schemas.Unset] = schemas.unset, + function_call: typing.Union['FunctionCall', schemas.Unset] = schemas.unset, + audio: typing.Union['Audio1', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionResponseMessage': + return super().__new__( + cls, + *_args, + role=role, + content=content, + refusal=refusal, + tool_calls=tool_calls, + annotations=annotations, + function_call=function_call, + audio=audio, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.annotation import Annotation +from launch.api_client.model.audio1 import Audio1 +from launch.api_client.model.chat_completion_message_tool_calls_output import ( + ChatCompletionMessageToolCallsOutput, +) +from launch.api_client.model.function_call import FunctionCall diff --git a/launch/api_client/model/chat_completion_stream_options.py b/launch/api_client/model/chat_completion_stream_options.py new file mode 100644 index 00000000..fd16c062 --- /dev/null +++ b/launch/api_client/model/chat_completion_stream_options.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionStreamOptions( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + + class include_usage( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'include_usage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "include_usage": include_usage, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["include_usage"]) -> MetaOapg.properties.include_usage: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["include_usage", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["include_usage"]) -> typing.Union[MetaOapg.properties.include_usage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["include_usage", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + include_usage: typing.Union[MetaOapg.properties.include_usage, None, bool, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionStreamOptions': + return super().__new__( + cls, + *_args, + include_usage=include_usage, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/chat_completion_stream_response_delta.py b/launch/api_client/model/chat_completion_stream_response_delta.py new file mode 100644 index 00000000..397aca58 --- /dev/null +++ b/launch/api_client/model/chat_completion_stream_response_delta.py @@ -0,0 +1,240 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionStreamResponseDelta( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + + class content( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def function_call() -> typing.Type['FunctionCall2']: + return FunctionCall2 + + + class tool_calls( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionMessageToolCallChunk']: + return ChatCompletionMessageToolCallChunk + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tool_calls': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class role( + schemas.EnumBase, + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + class MetaOapg: + enum_value_to_name = { + "developer": "DEVELOPER", + "system": "SYSTEM", + "user": "USER", + "assistant": "ASSISTANT", + "tool": "TOOL", + } + + @schemas.classproperty + def DEVELOPER(cls): + return cls("developer") + + @schemas.classproperty + def SYSTEM(cls): + return cls("system") + + @schemas.classproperty + def USER(cls): + return cls("user") + + @schemas.classproperty + def ASSISTANT(cls): + return cls("assistant") + + @schemas.classproperty + def TOOL(cls): + return cls("tool") + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'role': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class refusal( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'refusal': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "content": content, + "function_call": function_call, + "tool_calls": tool_calls, + "role": role, + "refusal": refusal, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> 'FunctionCall2': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_calls"]) -> MetaOapg.properties.tool_calls: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["role"]) -> MetaOapg.properties.role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "function_call", "tool_calls", "role", "refusal", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union['FunctionCall2', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_calls"]) -> typing.Union[MetaOapg.properties.tool_calls, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["role"]) -> typing.Union[MetaOapg.properties.role, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> typing.Union[MetaOapg.properties.refusal, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "function_call", "tool_calls", "role", "refusal", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + content: typing.Union[MetaOapg.properties.content, None, str, schemas.Unset] = schemas.unset, + function_call: typing.Union['FunctionCall2', schemas.Unset] = schemas.unset, + tool_calls: typing.Union[MetaOapg.properties.tool_calls, list, tuple, None, schemas.Unset] = schemas.unset, + role: typing.Union[MetaOapg.properties.role, None, str, schemas.Unset] = schemas.unset, + refusal: typing.Union[MetaOapg.properties.refusal, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionStreamResponseDelta': + return super().__new__( + cls, + *_args, + content=content, + function_call=function_call, + tool_calls=tool_calls, + role=role, + refusal=refusal, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_message_tool_call_chunk import ( + ChatCompletionMessageToolCallChunk, +) +from launch.api_client.model.function_call2 import FunctionCall2 diff --git a/launch/api_client/model/chat_completion_token_logprob.py b/launch/api_client/model/chat_completion_token_logprob.py new file mode 100644 index 00000000..2217a155 --- /dev/null +++ b/launch/api_client/model/chat_completion_token_logprob.py @@ -0,0 +1,169 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionTokenLogprob( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "top_logprobs", + "logprob", + "bytes", + "token", + } + + class properties: + token = schemas.StrSchema + logprob = schemas.NumberSchema + + + class bytes( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'bytes': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_logprobs( + schemas.ListSchema + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['TopLogprob']: + return TopLogprob + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['TopLogprob'], typing.List['TopLogprob']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_logprobs': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'TopLogprob': + return super().__getitem__(i) + __annotations__ = { + "token": token, + "logprob": logprob, + "bytes": bytes, + "top_logprobs": top_logprobs, + } + + top_logprobs: MetaOapg.properties.top_logprobs + logprob: MetaOapg.properties.logprob + bytes: MetaOapg.properties.bytes + token: MetaOapg.properties.token + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", "top_logprobs", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", "top_logprobs", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, list, tuple, ], + logprob: typing.Union[MetaOapg.properties.logprob, decimal.Decimal, int, float, ], + bytes: typing.Union[MetaOapg.properties.bytes, list, tuple, None, ], + token: typing.Union[MetaOapg.properties.token, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionTokenLogprob': + return super().__new__( + cls, + *_args, + top_logprobs=top_logprobs, + logprob=logprob, + bytes=bytes, + token=token, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.top_logprob import TopLogprob diff --git a/launch/api_client/model/chat_completion_tool.py b/launch/api_client/model/chat_completion_tool.py new file mode 100644 index 00000000..f87d91d0 --- /dev/null +++ b/launch/api_client/model/chat_completion_tool.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionTool( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "function", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "function": "FUNCTION", + } + + @schemas.classproperty + def FUNCTION(cls): + return cls("function") + + @staticmethod + def function() -> typing.Type['FunctionObject']: + return FunctionObject + __annotations__ = { + "type": type, + "function": function, + } + + function: 'FunctionObject' + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function"]) -> 'FunctionObject': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function"]) -> 'FunctionObject': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "function", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + function: 'FunctionObject', + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionTool': + return super().__new__( + cls, + *_args, + function=function, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.function_object import FunctionObject diff --git a/launch/api_client/model/chat_completion_tool_choice_option.py b/launch/api_client/model/chat_completion_tool_choice_option.py new file mode 100644 index 00000000..d39b735c --- /dev/null +++ b/launch/api_client/model/chat_completion_tool_choice_option.py @@ -0,0 +1,104 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionToolChoiceOption( + schemas.ComposedSchema, +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Controls which (if any) tool is called by the model. +`none` means the model will not call any tool and instead generates a message. +`auto` means the model can pick between generating a message or calling one or more tools. +`required` means the model must call one or more tools. +Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + +`none` is the default when no tools are present. `auto` is the default if tools are present. + + """ + + + class MetaOapg: + + + class any_of_0( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "none": "NONE", + "auto": "AUTO", + "required": "REQUIRED", + } + + @schemas.classproperty + def NONE(cls): + return cls("none") + + @schemas.classproperty + def AUTO(cls): + return cls("auto") + + @schemas.classproperty + def REQUIRED(cls): + return cls("required") + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + ChatCompletionNamedToolChoice, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionToolChoiceOption': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_named_tool_choice import ( + ChatCompletionNamedToolChoice, +) diff --git a/launch/api_client/model/chat_completion_v2_request.py b/launch/api_client/model/chat_completion_v2_request.py new file mode 100644 index 00000000..10542cc7 --- /dev/null +++ b/launch/api_client/model/chat_completion_v2_request.py @@ -0,0 +1,1788 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionV2Request( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "messages", + "model", + } + + class properties: + + + class messages( + schemas.ListSchema + ): + + + class MetaOapg: + min_items = 1 + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestMessage']: + return ChatCompletionRequestMessage + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessage'], typing.List['ChatCompletionRequestMessage']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'messages': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionRequestMessage': + return super().__getitem__(i) + model = schemas.StrSchema + + + class best_of( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'best_of': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_k( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = -1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_k': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class use_beam_search( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'use_beam_search': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class length_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'length_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class repetition_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'repetition_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class early_stopping( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'early_stopping': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stop_token_ids( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stop_token_ids': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class include_stop_str_in_output( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'include_stop_str_in_output': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ignore_eos( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ignore_eos': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class spaces_between_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'spaces_between_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class echo( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'echo': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class add_generation_prompt( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'add_generation_prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class continue_final_message( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'continue_final_message': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class add_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'add_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class documents( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + + class items( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'items': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'documents': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_kwargs( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'chat_template_kwargs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_json( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'guided_json': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_regex( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_regex': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_choice( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_grammar( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_grammar': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_decoding_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_decoding_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_whitespace_pattern( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_whitespace_pattern': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class priority( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def metadata() -> typing.Type['Metadata']: + return Metadata + + + class temperature( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'temperature': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 1.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class user( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'user': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def service_tier() -> typing.Type['ServiceTier']: + return ServiceTier + + @staticmethod + def modalities() -> typing.Type['ResponseModalities']: + return ResponseModalities + + @staticmethod + def reasoning_effort() -> typing.Type['ReasoningEffort']: + return ReasoningEffort + + + class max_completion_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_completion_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class frequency_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'frequency_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class presence_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'presence_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def web_search_options() -> typing.Type['WebSearchOptions']: + return WebSearchOptions + + + class top_logprobs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 20 + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class response_format( + schemas.ComposedSchema, + ): + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + ResponseFormatText, + ResponseFormatJsonSchema, + ResponseFormatJsonObject, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'response_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def audio() -> typing.Type['Audio2']: + return Audio2 + + + class store( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'store': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stream( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stream': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stop() -> typing.Type['StopConfiguration']: + return StopConfiguration + + + class logit_bias( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.IntSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], + ) -> 'logit_bias': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class logprobs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class n( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 128 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'n': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def prediction() -> typing.Type['PredictionContent']: + return PredictionContent + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = -9223372036854775616 + inclusive_minimum = 9223372036854775616 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stream_options() -> typing.Type['ChatCompletionStreamOptions']: + return ChatCompletionStreamOptions + + + class tools( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionTool']: + return ChatCompletionTool + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tools': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def tool_choice() -> typing.Type['ChatCompletionToolChoiceOption']: + return ChatCompletionToolChoiceOption + parallel_tool_calls = schemas.BoolSchema + + + class function_call( + schemas.ComposedSchema, + ): + + + class MetaOapg: + + + class any_of_0( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "none": "NONE", + "auto": "AUTO", + } + + @schemas.classproperty + def NONE(cls): + return cls("none") + + @schemas.classproperty + def AUTO(cls): + return cls("auto") + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + ChatCompletionFunctionCallOption, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'function_call': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class functions( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionFunctions']: + return ChatCompletionFunctions + max_items = 128 + min_items = 1 + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'functions': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "messages": messages, + "model": model, + "best_of": best_of, + "top_k": top_k, + "min_p": min_p, + "use_beam_search": use_beam_search, + "length_penalty": length_penalty, + "repetition_penalty": repetition_penalty, + "early_stopping": early_stopping, + "stop_token_ids": stop_token_ids, + "include_stop_str_in_output": include_stop_str_in_output, + "ignore_eos": ignore_eos, + "min_tokens": min_tokens, + "skip_special_tokens": skip_special_tokens, + "spaces_between_special_tokens": spaces_between_special_tokens, + "echo": echo, + "add_generation_prompt": add_generation_prompt, + "continue_final_message": continue_final_message, + "add_special_tokens": add_special_tokens, + "documents": documents, + "chat_template": chat_template, + "chat_template_kwargs": chat_template_kwargs, + "guided_json": guided_json, + "guided_regex": guided_regex, + "guided_choice": guided_choice, + "guided_grammar": guided_grammar, + "guided_decoding_backend": guided_decoding_backend, + "guided_whitespace_pattern": guided_whitespace_pattern, + "priority": priority, + "metadata": metadata, + "temperature": temperature, + "top_p": top_p, + "user": user, + "service_tier": service_tier, + "modalities": modalities, + "reasoning_effort": reasoning_effort, + "max_completion_tokens": max_completion_tokens, + "frequency_penalty": frequency_penalty, + "presence_penalty": presence_penalty, + "web_search_options": web_search_options, + "top_logprobs": top_logprobs, + "response_format": response_format, + "audio": audio, + "store": store, + "stream": stream, + "stop": stop, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "prediction": prediction, + "seed": seed, + "stream_options": stream_options, + "tools": tools, + "tool_choice": tool_choice, + "parallel_tool_calls": parallel_tool_calls, + "function_call": function_call, + "functions": functions, + } + + messages: MetaOapg.properties.messages + model: MetaOapg.properties.model + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["add_generation_prompt"]) -> MetaOapg.properties.add_generation_prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["continue_final_message"]) -> MetaOapg.properties.continue_final_message: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["documents"]) -> MetaOapg.properties.documents: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> MetaOapg.properties.chat_template_kwargs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> 'Metadata': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["modalities"]) -> 'ResponseModalities': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["reasoning_effort"]) -> 'ReasoningEffort': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_completion_tokens"]) -> MetaOapg.properties.max_completion_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["web_search_options"]) -> 'WebSearchOptions': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio2': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["store"]) -> MetaOapg.properties.store: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prediction"]) -> 'PredictionContent': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tools"]) -> MetaOapg.properties.tools: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_choice"]) -> 'ChatCompletionToolChoiceOption': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> MetaOapg.properties.parallel_tool_calls: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> MetaOapg.properties.function_call: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["functions"]) -> MetaOapg.properties.functions: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["messages", "model", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["add_generation_prompt"]) -> typing.Union[MetaOapg.properties.add_generation_prompt, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["continue_final_message"]) -> typing.Union[MetaOapg.properties.continue_final_message, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["documents"]) -> typing.Union[MetaOapg.properties.documents, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> typing.Union[MetaOapg.properties.chat_template_kwargs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union['Metadata', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["modalities"]) -> typing.Union['ResponseModalities', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["reasoning_effort"]) -> typing.Union['ReasoningEffort', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_completion_tokens"]) -> typing.Union[MetaOapg.properties.max_completion_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["web_search_options"]) -> typing.Union['WebSearchOptions', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> typing.Union[MetaOapg.properties.top_logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio2', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["store"]) -> typing.Union[MetaOapg.properties.store, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prediction"]) -> typing.Union['PredictionContent', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tools"]) -> typing.Union[MetaOapg.properties.tools, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_choice"]) -> typing.Union['ChatCompletionToolChoiceOption', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> typing.Union[MetaOapg.properties.parallel_tool_calls, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union[MetaOapg.properties.function_call, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["functions"]) -> typing.Union[MetaOapg.properties.functions, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["messages", "model", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + messages: typing.Union[MetaOapg.properties.messages, list, tuple, ], + model: typing.Union[MetaOapg.properties.model, str, ], + best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, + length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, + stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, + include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, + ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, + min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, + spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, + echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, + add_generation_prompt: typing.Union[MetaOapg.properties.add_generation_prompt, None, bool, schemas.Unset] = schemas.unset, + continue_final_message: typing.Union[MetaOapg.properties.continue_final_message, None, bool, schemas.Unset] = schemas.unset, + add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, + documents: typing.Union[MetaOapg.properties.documents, list, tuple, None, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + chat_template_kwargs: typing.Union[MetaOapg.properties.chat_template_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, + guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, + guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, + guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, + guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, + priority: typing.Union[MetaOapg.properties.priority, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + metadata: typing.Union['Metadata', schemas.Unset] = schemas.unset, + temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, + service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, + modalities: typing.Union['ResponseModalities', schemas.Unset] = schemas.unset, + reasoning_effort: typing.Union['ReasoningEffort', schemas.Unset] = schemas.unset, + max_completion_tokens: typing.Union[MetaOapg.properties.max_completion_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + web_search_options: typing.Union['WebSearchOptions', schemas.Unset] = schemas.unset, + top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + audio: typing.Union['Audio2', schemas.Unset] = schemas.unset, + store: typing.Union[MetaOapg.properties.store, None, bool, schemas.Unset] = schemas.unset, + stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, + stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, + logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + logprobs: typing.Union[MetaOapg.properties.logprobs, None, bool, schemas.Unset] = schemas.unset, + max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + prediction: typing.Union['PredictionContent', schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, + tools: typing.Union[MetaOapg.properties.tools, list, tuple, None, schemas.Unset] = schemas.unset, + tool_choice: typing.Union['ChatCompletionToolChoiceOption', schemas.Unset] = schemas.unset, + parallel_tool_calls: typing.Union[MetaOapg.properties.parallel_tool_calls, bool, schemas.Unset] = schemas.unset, + function_call: typing.Union[MetaOapg.properties.function_call, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + functions: typing.Union[MetaOapg.properties.functions, list, tuple, None, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionV2Request': + return super().__new__( + cls, + *_args, + messages=messages, + model=model, + best_of=best_of, + top_k=top_k, + min_p=min_p, + use_beam_search=use_beam_search, + length_penalty=length_penalty, + repetition_penalty=repetition_penalty, + early_stopping=early_stopping, + stop_token_ids=stop_token_ids, + include_stop_str_in_output=include_stop_str_in_output, + ignore_eos=ignore_eos, + min_tokens=min_tokens, + skip_special_tokens=skip_special_tokens, + spaces_between_special_tokens=spaces_between_special_tokens, + echo=echo, + add_generation_prompt=add_generation_prompt, + continue_final_message=continue_final_message, + add_special_tokens=add_special_tokens, + documents=documents, + chat_template=chat_template, + chat_template_kwargs=chat_template_kwargs, + guided_json=guided_json, + guided_regex=guided_regex, + guided_choice=guided_choice, + guided_grammar=guided_grammar, + guided_decoding_backend=guided_decoding_backend, + guided_whitespace_pattern=guided_whitespace_pattern, + priority=priority, + metadata=metadata, + temperature=temperature, + top_p=top_p, + user=user, + service_tier=service_tier, + modalities=modalities, + reasoning_effort=reasoning_effort, + max_completion_tokens=max_completion_tokens, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + web_search_options=web_search_options, + top_logprobs=top_logprobs, + response_format=response_format, + audio=audio, + store=store, + stream=stream, + stop=stop, + logit_bias=logit_bias, + logprobs=logprobs, + max_tokens=max_tokens, + n=n, + prediction=prediction, + seed=seed, + stream_options=stream_options, + tools=tools, + tool_choice=tool_choice, + parallel_tool_calls=parallel_tool_calls, + function_call=function_call, + functions=functions, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.audio2 import Audio2 +from launch.api_client.model.chat_completion_function_call_option import ( + ChatCompletionFunctionCallOption, +) +from launch.api_client.model.chat_completion_functions import ( + ChatCompletionFunctions, +) +from launch.api_client.model.chat_completion_request_message import ( + ChatCompletionRequestMessage, +) +from launch.api_client.model.chat_completion_stream_options import ( + ChatCompletionStreamOptions, +) +from launch.api_client.model.chat_completion_tool import ChatCompletionTool +from launch.api_client.model.chat_completion_tool_choice_option import ( + ChatCompletionToolChoiceOption, +) +from launch.api_client.model.metadata import Metadata +from launch.api_client.model.prediction_content import PredictionContent +from launch.api_client.model.reasoning_effort import ReasoningEffort +from launch.api_client.model.response_format_json_object import ( + ResponseFormatJsonObject, +) +from launch.api_client.model.response_format_json_schema import ( + ResponseFormatJsonSchema, +) +from launch.api_client.model.response_format_text import ResponseFormatText +from launch.api_client.model.response_modalities import ResponseModalities +from launch.api_client.model.service_tier import ServiceTier +from launch.api_client.model.stop_configuration import StopConfiguration +from launch.api_client.model.web_search_options import WebSearchOptions diff --git a/launch/api_client/model/update_batch_job_v1_request.pyi b/launch/api_client/model/chat_completion_v2_stream_error_chunk.py similarity index 51% rename from launch/api_client/model/update_batch_job_v1_request.pyi rename to launch/api_client/model/chat_completion_v2_stream_error_chunk.py index a86ddc67..7e093fe7 100644 --- a/launch/api_client/model/update_batch_job_v1_request.pyi +++ b/launch/api_client/model/chat_completion_v2_stream_error_chunk.py @@ -19,84 +19,70 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class UpdateBatchJobV1Request(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class ChatCompletionV2StreamErrorChunk( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { - "cancel", + "error", } - + class properties: - cancel = schemas.BoolSchema + + @staticmethod + def error() -> typing.Type['StreamError']: + return StreamError __annotations__ = { - "cancel": cancel, + "error": error, } - cancel: MetaOapg.properties.cancel - + + error: 'StreamError' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... + def __getitem__(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["error", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... + def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["error", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cancel: typing.Union[ - MetaOapg.properties.cancel, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + error: 'StreamError', _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateBatchJobV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ChatCompletionV2StreamErrorChunk': return super().__new__( cls, *_args, - cancel=cancel, + error=error, _configuration=_configuration, **kwargs, ) + +from launch.api_client.model.stream_error import StreamError diff --git a/launch/api_client/model/choice.py b/launch/api_client/model/choice.py new file mode 100644 index 00000000..b74ce765 --- /dev/null +++ b/launch/api_client/model/choice.py @@ -0,0 +1,165 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Choice( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "finish_reason", + "index", + "message", + "logprobs", + } + + class properties: + + + class finish_reason( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "stop": "STOP", + "length": "LENGTH", + "tool_calls": "TOOL_CALLS", + "content_filter": "CONTENT_FILTER", + "function_call": "FUNCTION_CALL", + } + + @schemas.classproperty + def STOP(cls): + return cls("stop") + + @schemas.classproperty + def LENGTH(cls): + return cls("length") + + @schemas.classproperty + def TOOL_CALLS(cls): + return cls("tool_calls") + + @schemas.classproperty + def CONTENT_FILTER(cls): + return cls("content_filter") + + @schemas.classproperty + def FUNCTION_CALL(cls): + return cls("function_call") + index = schemas.IntSchema + + @staticmethod + def message() -> typing.Type['ChatCompletionResponseMessage']: + return ChatCompletionResponseMessage + + @staticmethod + def logprobs() -> typing.Type['Logprobs']: + return Logprobs + __annotations__ = { + "finish_reason": finish_reason, + "index": index, + "message": message, + "logprobs": logprobs, + } + + finish_reason: MetaOapg.properties.finish_reason + index: MetaOapg.properties.index + message: 'ChatCompletionResponseMessage' + logprobs: 'Logprobs' + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["message"]) -> 'ChatCompletionResponseMessage': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "message", "logprobs", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> 'ChatCompletionResponseMessage': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "message", "logprobs", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + finish_reason: typing.Union[MetaOapg.properties.finish_reason, str, ], + index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], + message: 'ChatCompletionResponseMessage', + logprobs: 'Logprobs', + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Choice': + return super().__new__( + cls, + *_args, + finish_reason=finish_reason, + index=index, + message=message, + logprobs=logprobs, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_response_message import ( + ChatCompletionResponseMessage, +) +from launch.api_client.model.logprobs import Logprobs diff --git a/launch/api_client/model/choice1.py b/launch/api_client/model/choice1.py new file mode 100644 index 00000000..ee27ba62 --- /dev/null +++ b/launch/api_client/model/choice1.py @@ -0,0 +1,178 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Choice1( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "finish_reason", + "delta", + "index", + } + + class properties: + + @staticmethod + def delta() -> typing.Type['ChatCompletionStreamResponseDelta']: + return ChatCompletionStreamResponseDelta + + + class finish_reason( + schemas.EnumBase, + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + class MetaOapg: + enum_value_to_name = { + "stop": "STOP", + "length": "LENGTH", + "tool_calls": "TOOL_CALLS", + "content_filter": "CONTENT_FILTER", + "function_call": "FUNCTION_CALL", + } + + @schemas.classproperty + def STOP(cls): + return cls("stop") + + @schemas.classproperty + def LENGTH(cls): + return cls("length") + + @schemas.classproperty + def TOOL_CALLS(cls): + return cls("tool_calls") + + @schemas.classproperty + def CONTENT_FILTER(cls): + return cls("content_filter") + + @schemas.classproperty + def FUNCTION_CALL(cls): + return cls("function_call") + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'finish_reason': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + index = schemas.IntSchema + + @staticmethod + def logprobs() -> typing.Type['Logprobs']: + return Logprobs + __annotations__ = { + "delta": delta, + "finish_reason": finish_reason, + "index": index, + "logprobs": logprobs, + } + + finish_reason: MetaOapg.properties.finish_reason + delta: 'ChatCompletionStreamResponseDelta' + index: MetaOapg.properties.index + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["delta"]) -> 'ChatCompletionStreamResponseDelta': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["delta", "finish_reason", "index", "logprobs", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["delta"]) -> 'ChatCompletionStreamResponseDelta': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union['Logprobs', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["delta", "finish_reason", "index", "logprobs", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + finish_reason: typing.Union[MetaOapg.properties.finish_reason, None, str, ], + delta: 'ChatCompletionStreamResponseDelta', + index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], + logprobs: typing.Union['Logprobs', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Choice1': + return super().__new__( + cls, + *_args, + finish_reason=finish_reason, + delta=delta, + index=index, + logprobs=logprobs, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_stream_response_delta import ( + ChatCompletionStreamResponseDelta, +) +from launch.api_client.model.logprobs import Logprobs diff --git a/launch/api_client/model/choice2.py b/launch/api_client/model/choice2.py new file mode 100644 index 00000000..c5843aa9 --- /dev/null +++ b/launch/api_client/model/choice2.py @@ -0,0 +1,149 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Choice2( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "finish_reason", + "index", + "text", + "logprobs", + } + + class properties: + + + class finish_reason( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "stop": "STOP", + "length": "LENGTH", + "content_filter": "CONTENT_FILTER", + } + + @schemas.classproperty + def STOP(cls): + return cls("stop") + + @schemas.classproperty + def LENGTH(cls): + return cls("length") + + @schemas.classproperty + def CONTENT_FILTER(cls): + return cls("content_filter") + index = schemas.IntSchema + + @staticmethod + def logprobs() -> typing.Type['Logprobs2']: + return Logprobs2 + text = schemas.StrSchema + __annotations__ = { + "finish_reason": finish_reason, + "index": index, + "logprobs": logprobs, + "text": text, + } + + finish_reason: MetaOapg.properties.finish_reason + index: MetaOapg.properties.index + text: MetaOapg.properties.text + logprobs: 'Logprobs2' + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs2': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "logprobs", "text", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["finish_reason"]) -> MetaOapg.properties.finish_reason: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["index"]) -> MetaOapg.properties.index: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> 'Logprobs2': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["finish_reason", "index", "logprobs", "text", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + finish_reason: typing.Union[MetaOapg.properties.finish_reason, str, ], + index: typing.Union[MetaOapg.properties.index, decimal.Decimal, int, ], + text: typing.Union[MetaOapg.properties.text, str, ], + logprobs: 'Logprobs2', + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Choice2': + return super().__new__( + cls, + *_args, + finish_reason=finish_reason, + index=index, + text=text, + logprobs=logprobs, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.logprobs2 import Logprobs2 diff --git a/launch/api_client/model/clone_model_bundle_v1_request.py b/launch/api_client/model/clone_model_bundle_v1_request.py index dc711b84..299cdd13 100644 --- a/launch/api_client/model/clone_model_bundle_v1_request.py +++ b/launch/api_client/model/clone_model_bundle_v1_request.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CloneModelBundleV1Request(schemas.DictSchema): +class CloneModelBundleV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,107 +34,89 @@ class CloneModelBundleV1Request(schemas.DictSchema): Request object for cloning a Model Bundle from another one. """ + class MetaOapg: required = { "original_model_bundle_id", } - + class properties: original_model_bundle_id = schemas.StrSchema - new_app_config = schemas.DictSchema + + + class new_app_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'new_app_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) __annotations__ = { "original_model_bundle_id": original_model_bundle_id, "new_app_config": new_app_config, } - + original_model_bundle_id: MetaOapg.properties.original_model_bundle_id - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["new_app_config"] - ) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["new_app_config"]) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - original_model_bundle_id: typing.Union[ - MetaOapg.properties.original_model_bundle_id, - str, - ], - new_app_config: typing.Union[ - MetaOapg.properties.new_app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + original_model_bundle_id: typing.Union[MetaOapg.properties.original_model_bundle_id, str, ], + new_app_config: typing.Union[MetaOapg.properties.new_app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CloneModelBundleV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CloneModelBundleV1Request': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/clone_model_bundle_v1_request.pyi b/launch/api_client/model/clone_model_bundle_v1_request.pyi deleted file mode 100644 index dc37a0dc..00000000 --- a/launch/api_client/model/clone_model_bundle_v1_request.pyi +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CloneModelBundleV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for cloning a Model Bundle from another one. - """ - - class MetaOapg: - required = { - "original_model_bundle_id", - } - - class properties: - original_model_bundle_id = schemas.StrSchema - new_app_config = schemas.DictSchema - __annotations__ = { - "original_model_bundle_id": original_model_bundle_id, - "new_app_config": new_app_config, - } - original_model_bundle_id: MetaOapg.properties.original_model_bundle_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["new_app_config"] - ) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - original_model_bundle_id: typing.Union[ - MetaOapg.properties.original_model_bundle_id, - str, - ], - new_app_config: typing.Union[ - MetaOapg.properties.new_app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CloneModelBundleV1Request": - return super().__new__( - cls, - *_args, - original_model_bundle_id=original_model_bundle_id, - new_app_config=new_app_config, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/clone_model_bundle_v2_request.py b/launch/api_client/model/clone_model_bundle_v2_request.py index 2a918278..67602165 100644 --- a/launch/api_client/model/clone_model_bundle_v2_request.py +++ b/launch/api_client/model/clone_model_bundle_v2_request.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CloneModelBundleV2Request(schemas.DictSchema): +class CloneModelBundleV2Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,107 +34,89 @@ class CloneModelBundleV2Request(schemas.DictSchema): Request object for cloning a Model Bundle from another one. """ + class MetaOapg: required = { "original_model_bundle_id", } - + class properties: original_model_bundle_id = schemas.StrSchema - new_app_config = schemas.DictSchema + + + class new_app_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'new_app_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) __annotations__ = { "original_model_bundle_id": original_model_bundle_id, "new_app_config": new_app_config, } - + original_model_bundle_id: MetaOapg.properties.original_model_bundle_id - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["original_model_bundle_id"]) -> MetaOapg.properties.original_model_bundle_id: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["new_app_config"] - ) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["new_app_config"]) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["original_model_bundle_id", "new_app_config", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - original_model_bundle_id: typing.Union[ - MetaOapg.properties.original_model_bundle_id, - str, - ], - new_app_config: typing.Union[ - MetaOapg.properties.new_app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + original_model_bundle_id: typing.Union[MetaOapg.properties.original_model_bundle_id, str, ], + new_app_config: typing.Union[MetaOapg.properties.new_app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CloneModelBundleV2Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CloneModelBundleV2Request': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/clone_model_bundle_v2_request.pyi b/launch/api_client/model/clone_model_bundle_v2_request.pyi deleted file mode 100644 index 82891ae5..00000000 --- a/launch/api_client/model/clone_model_bundle_v2_request.pyi +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CloneModelBundleV2Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for cloning a Model Bundle from another one. - """ - - class MetaOapg: - required = { - "original_model_bundle_id", - } - - class properties: - original_model_bundle_id = schemas.StrSchema - new_app_config = schemas.DictSchema - __annotations__ = { - "original_model_bundle_id": original_model_bundle_id, - "new_app_config": new_app_config, - } - original_model_bundle_id: MetaOapg.properties.original_model_bundle_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["new_app_config"]) -> MetaOapg.properties.new_app_config: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["original_model_bundle_id"] - ) -> MetaOapg.properties.original_model_bundle_id: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["new_app_config"] - ) -> typing.Union[MetaOapg.properties.new_app_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "original_model_bundle_id", - "new_app_config", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - original_model_bundle_id: typing.Union[ - MetaOapg.properties.original_model_bundle_id, - str, - ], - new_app_config: typing.Union[ - MetaOapg.properties.new_app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CloneModelBundleV2Request": - return super().__new__( - cls, - *_args, - original_model_bundle_id=original_model_bundle_id, - new_app_config=new_app_config, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/cloudpickle_artifact_flavor.py b/launch/api_client/model/cloudpickle_artifact_flavor.py index c0d3366d..669356c3 100644 --- a/launch/api_client/model/cloudpickle_artifact_flavor.py +++ b/launch/api_client/model/cloudpickle_artifact_flavor.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CloudpickleArtifactFlavor(schemas.DictSchema): +class CloudpickleArtifactFlavor( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class CloudpickleArtifactFlavor(schemas.DictSchema): This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. """ + class MetaOapg: required = { "flavor", @@ -41,22 +44,40 @@ class MetaOapg: "location", "load_predict_fn", } - + class properties: - class flavor(schemas.EnumBase, schemas.StrSchema): + + + class requirements( + schemas.ListSchema + ): + + class MetaOapg: - enum_value_to_name = { - "cloudpickle_artifact": "CLOUDPICKLE_ARTIFACT", - } - - @schemas.classproperty - def CLOUDPICKLE_ARTIFACT(cls): - return cls("cloudpickle_artifact") - + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'requirements': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + class framework( schemas.ComposedSchema, ): + + class MetaOapg: + @classmethod @functools.lru_cache() def one_of(cls): @@ -72,273 +93,159 @@ def one_of(cls): TensorflowFramework, CustomFramework, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "framework": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'framework': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - load_model_fn = schemas.StrSchema - load_predict_fn = schemas.StrSchema location = schemas.StrSchema - - class requirements(schemas.ListSchema): + + + class flavor( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: - items = schemas.StrSchema - + enum_value_to_name = { + "cloudpickle_artifact": "CLOUDPICKLE_ARTIFACT", + } + + @schemas.classproperty + def CLOUDPICKLE_ARTIFACT(cls): + return cls("cloudpickle_artifact") + load_predict_fn = schemas.StrSchema + load_model_fn = schemas.StrSchema + + + class app_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'app_config': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, + **kwargs, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - app_config = schemas.DictSchema __annotations__ = { - "flavor": flavor, + "requirements": requirements, "framework": framework, - "load_model_fn": load_model_fn, - "load_predict_fn": load_predict_fn, "location": location, - "requirements": requirements, + "flavor": flavor, + "load_predict_fn": load_predict_fn, + "load_model_fn": load_model_fn, "app_config": app_config, } - + flavor: MetaOapg.properties.flavor requirements: MetaOapg.properties.requirements framework: MetaOapg.properties.framework load_model_fn: MetaOapg.properties.load_model_fn location: MetaOapg.properties.location load_predict_fn: MetaOapg.properties.load_predict_fn - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: - ... - + def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: - ... - + def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_predict_fn"]) -> MetaOapg.properties.load_predict_fn: - ... - + def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def __getitem__(self, name: typing_extensions.Literal["load_predict_fn"]) -> MetaOapg.properties.load_predict_fn: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def __getitem__(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn", - "load_predict_fn", - "location", - "requirements", - "app_config", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn", "load_model_fn", "app_config", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_predict_fn"]) -> MetaOapg.properties.load_predict_fn: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["load_predict_fn"]) -> MetaOapg.properties.load_predict_fn: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn", - "load_predict_fn", - "location", - "requirements", - "app_config", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn", "load_model_fn", "app_config", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - framework: typing.Union[ - MetaOapg.properties.framework, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - load_model_fn: typing.Union[ - MetaOapg.properties.load_model_fn, - str, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - load_predict_fn: typing.Union[ - MetaOapg.properties.load_predict_fn, - str, - ], - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + flavor: typing.Union[MetaOapg.properties.flavor, str, ], + requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], + framework: typing.Union[MetaOapg.properties.framework, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + load_model_fn: typing.Union[MetaOapg.properties.load_model_fn, str, ], + location: typing.Union[MetaOapg.properties.location, str, ], + load_predict_fn: typing.Union[MetaOapg.properties.load_predict_fn, str, ], + app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CloudpickleArtifactFlavor": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CloudpickleArtifactFlavor': return super().__new__( cls, *_args, @@ -353,7 +260,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.custom_framework import CustomFramework from launch.api_client.model.pytorch_framework import PytorchFramework from launch.api_client.model.tensorflow_framework import TensorflowFramework diff --git a/launch/api_client/model/cloudpickle_artifact_flavor.pyi b/launch/api_client/model/cloudpickle_artifact_flavor.pyi deleted file mode 100644 index 021e1b61..00000000 --- a/launch/api_client/model/cloudpickle_artifact_flavor.pyi +++ /dev/null @@ -1,315 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CloudpickleArtifactFlavor(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact. - """ - - class MetaOapg: - required = { - "flavor", - "requirements", - "framework", - "load_model_fn", - "location", - "load_predict_fn", - } - - class properties: - class flavor(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def CLOUDPICKLE_ARTIFACT(cls): - return cls("cloudpickle_artifact") - - class framework( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - PytorchFramework, - TensorflowFramework, - CustomFramework, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "framework": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - load_model_fn = schemas.StrSchema - load_predict_fn = schemas.StrSchema - location = schemas.StrSchema - - class requirements(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - app_config = schemas.DictSchema - __annotations__ = { - "flavor": flavor, - "framework": framework, - "load_model_fn": load_model_fn, - "load_predict_fn": load_predict_fn, - "location": location, - "requirements": requirements, - "app_config": app_config, - } - flavor: MetaOapg.properties.flavor - requirements: MetaOapg.properties.requirements - framework: MetaOapg.properties.framework - load_model_fn: MetaOapg.properties.load_model_fn - location: MetaOapg.properties.location - load_predict_fn: MetaOapg.properties.load_predict_fn - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["load_predict_fn"] - ) -> MetaOapg.properties.load_predict_fn: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn", - "load_predict_fn", - "location", - "requirements", - "app_config", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["load_model_fn"]) -> MetaOapg.properties.load_model_fn: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["load_predict_fn"] - ) -> MetaOapg.properties.load_predict_fn: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn", - "load_predict_fn", - "location", - "requirements", - "app_config", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - framework: typing.Union[ - MetaOapg.properties.framework, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - load_model_fn: typing.Union[ - MetaOapg.properties.load_model_fn, - str, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - load_predict_fn: typing.Union[ - MetaOapg.properties.load_predict_fn, - str, - ], - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CloudpickleArtifactFlavor": - return super().__new__( - cls, - *_args, - flavor=flavor, - requirements=requirements, - framework=framework, - load_model_fn=load_model_fn, - location=location, - load_predict_fn=load_predict_fn, - app_config=app_config, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.custom_framework import CustomFramework -from launch_client.model.pytorch_framework import PytorchFramework -from launch_client.model.tensorflow_framework import TensorflowFramework diff --git a/launch/api_client/model/completion_output.py b/launch/api_client/model/completion_output.py index b7ea6ab7..ff1934de 100644 --- a/launch/api_client/model/completion_output.py +++ b/launch/api_client/model/completion_output.py @@ -23,184 +23,142 @@ from launch.api_client import schemas # noqa: F401 -class CompletionOutput(schemas.DictSchema): +class CompletionOutput( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. + + Represents the output of a completion request to a model. """ + class MetaOapg: required = { - "num_prompt_tokens", "num_completion_tokens", "text", } - + class properties: - num_completion_tokens = schemas.IntSchema - num_prompt_tokens = schemas.IntSchema text = schemas.StrSchema - - class tokens(schemas.ListSchema): + num_completion_tokens = schemas.IntSchema + + + class num_prompt_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_prompt_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokens( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["TokenOutput"]: + def items() -> typing.Type['TokenOutput']: return TokenOutput - + + def __new__( cls, - _arg: typing.Union[typing.Tuple["TokenOutput"], typing.List["TokenOutput"]], + *_args: typing.Union[list, tuple, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "tokens": + ) -> 'tokens': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "TokenOutput": - return super().__getitem__(i) - __annotations__ = { + "text": text, "num_completion_tokens": num_completion_tokens, "num_prompt_tokens": num_prompt_tokens, - "text": text, "tokens": tokens, } - - num_prompt_tokens: MetaOapg.properties.num_prompt_tokens + num_completion_tokens: MetaOapg.properties.num_completion_tokens text: MetaOapg.properties.text - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> MetaOapg.properties.num_completion_tokens: - ... - + def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> MetaOapg.properties.num_prompt_tokens: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_completion_tokens"]) -> MetaOapg.properties.num_completion_tokens: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> MetaOapg.properties.num_prompt_tokens: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokens"]) -> MetaOapg.properties.tokens: - ... - + def __getitem__(self, name: typing_extensions.Literal["tokens"]) -> MetaOapg.properties.tokens: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "num_completion_tokens", - "num_prompt_tokens", - "text", - "tokens", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["text", "num_completion_tokens", "num_prompt_tokens", "tokens", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> MetaOapg.properties.num_completion_tokens: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> MetaOapg.properties.num_prompt_tokens: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_completion_tokens"]) -> MetaOapg.properties.num_completion_tokens: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> typing.Union[MetaOapg.properties.num_prompt_tokens, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tokens"] - ) -> typing.Union[MetaOapg.properties.tokens, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["tokens"]) -> typing.Union[MetaOapg.properties.tokens, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "num_completion_tokens", - "num_prompt_tokens", - "text", - "tokens", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["text", "num_completion_tokens", "num_prompt_tokens", "tokens", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - num_prompt_tokens: typing.Union[ - MetaOapg.properties.num_prompt_tokens, - decimal.Decimal, - int, - ], - num_completion_tokens: typing.Union[ - MetaOapg.properties.num_completion_tokens, - decimal.Decimal, - int, - ], - text: typing.Union[ - MetaOapg.properties.text, - str, - ], - tokens: typing.Union[MetaOapg.properties.tokens, list, tuple, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + num_completion_tokens: typing.Union[MetaOapg.properties.num_completion_tokens, decimal.Decimal, int, ], + text: typing.Union[MetaOapg.properties.text, str, ], + num_prompt_tokens: typing.Union[MetaOapg.properties.num_prompt_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tokens: typing.Union[MetaOapg.properties.tokens, list, tuple, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionOutput": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionOutput': return super().__new__( cls, *_args, - num_prompt_tokens=num_prompt_tokens, num_completion_tokens=num_completion_tokens, text=text, + num_prompt_tokens=num_prompt_tokens, tokens=tokens, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.token_output import TokenOutput diff --git a/launch/api_client/model/completion_output.pyi b/launch/api_client/model/completion_output.pyi deleted file mode 100644 index 74acfcc0..00000000 --- a/launch/api_client/model/completion_output.pyi +++ /dev/null @@ -1,177 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CompletionOutput(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "num_prompt_tokens", - "num_completion_tokens", - "text", - } - - class properties: - num_completion_tokens = schemas.IntSchema - num_prompt_tokens = schemas.IntSchema - text = schemas.StrSchema - - class tokens(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["TokenOutput"]: - return TokenOutput - def __new__( - cls, - _arg: typing.Union[typing.Tuple["TokenOutput"], typing.List["TokenOutput"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "tokens": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "TokenOutput": - return super().__getitem__(i) - __annotations__ = { - "num_completion_tokens": num_completion_tokens, - "num_prompt_tokens": num_prompt_tokens, - "text": text, - "tokens": tokens, - } - num_prompt_tokens: MetaOapg.properties.num_prompt_tokens - num_completion_tokens: MetaOapg.properties.num_completion_tokens - text: MetaOapg.properties.text - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> MetaOapg.properties.num_completion_tokens: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> MetaOapg.properties.num_prompt_tokens: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tokens"]) -> MetaOapg.properties.tokens: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "num_completion_tokens", - "num_prompt_tokens", - "text", - "tokens", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> MetaOapg.properties.num_completion_tokens: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> MetaOapg.properties.num_prompt_tokens: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tokens"] - ) -> typing.Union[MetaOapg.properties.tokens, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "num_completion_tokens", - "num_prompt_tokens", - "text", - "tokens", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - num_prompt_tokens: typing.Union[ - MetaOapg.properties.num_prompt_tokens, - decimal.Decimal, - int, - ], - num_completion_tokens: typing.Union[ - MetaOapg.properties.num_completion_tokens, - decimal.Decimal, - int, - ], - text: typing.Union[ - MetaOapg.properties.text, - str, - ], - tokens: typing.Union[MetaOapg.properties.tokens, list, tuple, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionOutput": - return super().__new__( - cls, - *_args, - num_prompt_tokens=num_prompt_tokens, - num_completion_tokens=num_completion_tokens, - text=text, - tokens=tokens, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.token_output import TokenOutput diff --git a/launch/api_client/model/completion_stream_output.py b/launch/api_client/model/completion_stream_output.py index 05149201..2c673495 100644 --- a/launch/api_client/model/completion_stream_output.py +++ b/launch/api_client/model/completion_stream_output.py @@ -23,177 +23,146 @@ from launch.api_client import schemas # noqa: F401 -class CompletionStreamOutput(schemas.DictSchema): +class CompletionStreamOutput( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "finished", "text", } - + class properties: - finished = schemas.BoolSchema text = schemas.StrSchema - num_completion_tokens = schemas.IntSchema - num_prompt_tokens = schemas.IntSchema - + finished = schemas.BoolSchema + + + class num_prompt_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_prompt_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_completion_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_completion_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def token() -> typing.Type["TokenOutput"]: + def token() -> typing.Type['TokenOutput']: return TokenOutput - __annotations__ = { - "finished": finished, "text": text, - "num_completion_tokens": num_completion_tokens, + "finished": finished, "num_prompt_tokens": num_prompt_tokens, + "num_completion_tokens": num_completion_tokens, "token": token, } - + finished: MetaOapg.properties.finished text: MetaOapg.properties.text - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: - ... - + def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: - ... - + def __getitem__(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> MetaOapg.properties.num_completion_tokens: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> MetaOapg.properties.num_prompt_tokens: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> MetaOapg.properties.num_prompt_tokens: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_completion_tokens"]) -> MetaOapg.properties.num_completion_tokens: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> "TokenOutput": - ... - + def __getitem__(self, name: typing_extensions.Literal["token"]) -> 'TokenOutput': ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "finished", - "text", - "num_completion_tokens", - "num_prompt_tokens", - "token", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["text", "finished", "num_prompt_tokens", "num_completion_tokens", "token", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> typing.Union[MetaOapg.properties.num_completion_tokens, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_prompt_tokens"]) -> typing.Union[MetaOapg.properties.num_prompt_tokens, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> typing.Union[MetaOapg.properties.num_prompt_tokens, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_completion_tokens"]) -> typing.Union[MetaOapg.properties.num_completion_tokens, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> typing.Union["TokenOutput", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> typing.Union['TokenOutput', schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "finished", - "text", - "num_completion_tokens", - "num_prompt_tokens", - "token", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["text", "finished", "num_prompt_tokens", "num_completion_tokens", "token", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - finished: typing.Union[ - MetaOapg.properties.finished, - bool, - ], - text: typing.Union[ - MetaOapg.properties.text, - str, - ], - num_completion_tokens: typing.Union[ - MetaOapg.properties.num_completion_tokens, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - num_prompt_tokens: typing.Union[ - MetaOapg.properties.num_prompt_tokens, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - token: typing.Union["TokenOutput", schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + finished: typing.Union[MetaOapg.properties.finished, bool, ], + text: typing.Union[MetaOapg.properties.text, str, ], + num_prompt_tokens: typing.Union[MetaOapg.properties.num_prompt_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + num_completion_tokens: typing.Union[MetaOapg.properties.num_completion_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + token: typing.Union['TokenOutput', schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionStreamOutput": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionStreamOutput': return super().__new__( cls, *_args, finished=finished, text=text, - num_completion_tokens=num_completion_tokens, num_prompt_tokens=num_prompt_tokens, + num_completion_tokens=num_completion_tokens, token=token, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.token_output import TokenOutput diff --git a/launch/api_client/model/completion_stream_output.pyi b/launch/api_client/model/completion_stream_output.pyi deleted file mode 100644 index bd191e7c..00000000 --- a/launch/api_client/model/completion_stream_output.pyi +++ /dev/null @@ -1,168 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CompletionStreamOutput(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "finished", - "text", - } - - class properties: - finished = schemas.BoolSchema - text = schemas.StrSchema - num_completion_tokens = schemas.IntSchema - num_prompt_tokens = schemas.IntSchema - - @staticmethod - def token() -> typing.Type["TokenOutput"]: - return TokenOutput - __annotations__ = { - "finished": finished, - "text": text, - "num_completion_tokens": num_completion_tokens, - "num_prompt_tokens": num_prompt_tokens, - "token": token, - } - finished: MetaOapg.properties.finished - text: MetaOapg.properties.text - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> MetaOapg.properties.num_completion_tokens: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> MetaOapg.properties.num_prompt_tokens: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> "TokenOutput": ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "finished", - "text", - "num_completion_tokens", - "num_prompt_tokens", - "token", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["finished"]) -> MetaOapg.properties.finished: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["text"]) -> MetaOapg.properties.text: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_completion_tokens"] - ) -> typing.Union[MetaOapg.properties.num_completion_tokens, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_prompt_tokens"] - ) -> typing.Union[MetaOapg.properties.num_prompt_tokens, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> typing.Union["TokenOutput", schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "finished", - "text", - "num_completion_tokens", - "num_prompt_tokens", - "token", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - finished: typing.Union[ - MetaOapg.properties.finished, - bool, - ], - text: typing.Union[ - MetaOapg.properties.text, - str, - ], - num_completion_tokens: typing.Union[ - MetaOapg.properties.num_completion_tokens, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - num_prompt_tokens: typing.Union[ - MetaOapg.properties.num_prompt_tokens, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - token: typing.Union["TokenOutput", schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionStreamOutput": - return super().__new__( - cls, - *_args, - finished=finished, - text=text, - num_completion_tokens=num_completion_tokens, - num_prompt_tokens=num_prompt_tokens, - token=token, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.token_output import TokenOutput diff --git a/launch/api_client/model/completion_stream_v1_request.py b/launch/api_client/model/completion_stream_v1_request.py index f7910724..8fb5fe58 100644 --- a/launch/api_client/model/completion_stream_v1_request.py +++ b/launch/api_client/model/completion_stream_v1_request.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CompletionStreamV1Request(schemas.DictSchema): +class CompletionStreamV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,404 +34,472 @@ class CompletionStreamV1Request(schemas.DictSchema): Request object for a stream prompt completion task. """ + class MetaOapg: required = { "max_new_tokens", "temperature", "prompt", } - + class properties: - max_new_tokens = schemas.IntSchema prompt = schemas.StrSchema - - class temperature(schemas.NumberSchema): + max_new_tokens = schemas.IntSchema + + + class temperature( + schemas.NumberSchema + ): + + class MetaOapg: inclusive_maximum = 1.0 inclusive_minimum = 0.0 - - class frequency_penalty(schemas.NumberSchema): - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - class guided_choice(schemas.ListSchema): + + + class stop_sequences( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + class MetaOapg: items = schemas.StrSchema - + + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[list, tuple, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "guided_choice": + ) -> 'stop_sequences': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - guided_grammar = schemas.StrSchema - guided_json = schemas.DictSchema - guided_regex = schemas.StrSchema - include_stop_str_in_output = schemas.BoolSchema - - class presence_penalty(schemas.NumberSchema): + + + class return_token_log_probs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'return_token_log_probs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class presence_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_maximum = 2.0 inclusive_minimum = 0.0 - - return_token_log_probs = schemas.BoolSchema - - class stop_sequences(schemas.ListSchema): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'presence_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class frequency_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: - items = schemas.StrSchema - + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[None, decimal.Decimal, int, float, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "stop_sequences": + ) -> 'frequency_penalty': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class top_k(schemas.IntSchema): + + + class top_k( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_minimum = -1 - - class top_p(schemas.NumberSchema): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_k': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_maximum = 1.0 - + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class include_stop_str_in_output( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'include_stop_str_in_output': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_json( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'guided_json': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_regex( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_regex': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_choice( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_grammar( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_grammar': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "max_new_tokens": max_new_tokens, "prompt": prompt, + "max_new_tokens": max_new_tokens, "temperature": temperature, - "frequency_penalty": frequency_penalty, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_json": guided_json, - "guided_regex": guided_regex, - "include_stop_str_in_output": include_stop_str_in_output, - "presence_penalty": presence_penalty, - "return_token_log_probs": return_token_log_probs, "stop_sequences": stop_sequences, + "return_token_log_probs": return_token_log_probs, + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, "top_k": top_k, "top_p": top_p, + "include_stop_str_in_output": include_stop_str_in_output, + "guided_json": guided_json, + "guided_regex": guided_regex, + "guided_choice": guided_choice, + "guided_grammar": guided_grammar, + "skip_special_tokens": skip_special_tokens, } - + max_new_tokens: MetaOapg.properties.max_new_tokens temperature: MetaOapg.properties.temperature prompt: MetaOapg.properties.prompt - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: - ... - + def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: - ... - + def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: - ... - + def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> MetaOapg.properties.frequency_penalty: - ... - + def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: - ... - + def __getitem__(self, name: typing_extensions.Literal["return_token_log_probs"]) -> MetaOapg.properties.return_token_log_probs: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: - ... - + def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: - ... - + def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: - ... - + def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> MetaOapg.properties.include_stop_str_in_output: - ... - + def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: - ... - + def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> MetaOapg.properties.return_token_log_probs: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["stop_sequences"]) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_choice"] - ) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["return_token_log_probs"]) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_grammar"] - ) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_json"] - ) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_regex"] - ) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["stop_sequences"] - ) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_k"] - ) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_p"] - ) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_new_tokens: typing.Union[ - MetaOapg.properties.max_new_tokens, - decimal.Decimal, - int, - ], - temperature: typing.Union[ - MetaOapg.properties.temperature, - decimal.Decimal, - int, - float, - ], - prompt: typing.Union[ - MetaOapg.properties.prompt, - str, - ], - frequency_penalty: typing.Union[ - MetaOapg.properties.frequency_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, str, schemas.Unset] = schemas.unset, - guided_json: typing.Union[ - MetaOapg.properties.guided_json, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, str, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[ - MetaOapg.properties.include_stop_str_in_output, bool, schemas.Unset - ] = schemas.unset, - presence_penalty: typing.Union[ - MetaOapg.properties.presence_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - return_token_log_probs: typing.Union[ - MetaOapg.properties.return_token_log_probs, bool, schemas.Unset - ] = schemas.unset, - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + max_new_tokens: typing.Union[MetaOapg.properties.max_new_tokens, decimal.Decimal, int, ], + temperature: typing.Union[MetaOapg.properties.temperature, decimal.Decimal, int, float, ], + prompt: typing.Union[MetaOapg.properties.prompt, str, ], + stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, None, schemas.Unset] = schemas.unset, + return_token_log_probs: typing.Union[MetaOapg.properties.return_token_log_probs, None, bool, schemas.Unset] = schemas.unset, + presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, + guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, + guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, + guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, + skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionStreamV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionStreamV1Request': return super().__new__( cls, *_args, max_new_tokens=max_new_tokens, temperature=temperature, prompt=prompt, - frequency_penalty=frequency_penalty, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_json=guided_json, - guided_regex=guided_regex, - include_stop_str_in_output=include_stop_str_in_output, - presence_penalty=presence_penalty, - return_token_log_probs=return_token_log_probs, stop_sequences=stop_sequences, + return_token_log_probs=return_token_log_probs, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, top_k=top_k, top_p=top_p, + include_stop_str_in_output=include_stop_str_in_output, + guided_json=guided_json, + guided_regex=guided_regex, + guided_choice=guided_choice, + guided_grammar=guided_grammar, + skip_special_tokens=skip_special_tokens, _configuration=_configuration, **kwargs, ) diff --git a/launch/api_client/model/completion_stream_v1_request.pyi b/launch/api_client/model/completion_stream_v1_request.pyi deleted file mode 100644 index f4312c83..00000000 --- a/launch/api_client/model/completion_stream_v1_request.pyi +++ /dev/null @@ -1,359 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CompletionStreamV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for a stream prompt completion task. - """ - - class MetaOapg: - required = { - "max_new_tokens", - "temperature", - "prompt", - } - - class properties: - max_new_tokens = schemas.IntSchema - prompt = schemas.StrSchema - - class temperature(schemas.NumberSchema): - pass - - class frequency_penalty(schemas.NumberSchema): - pass - - class guided_choice(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "guided_choice": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - guided_grammar = schemas.StrSchema - guided_json = schemas.DictSchema - guided_regex = schemas.StrSchema - include_stop_str_in_output = schemas.BoolSchema - - class presence_penalty(schemas.NumberSchema): - pass - return_token_log_probs = schemas.BoolSchema - - class stop_sequences(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "stop_sequences": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class top_k(schemas.IntSchema): - pass - - class top_p(schemas.NumberSchema): - pass - __annotations__ = { - "max_new_tokens": max_new_tokens, - "prompt": prompt, - "temperature": temperature, - "frequency_penalty": frequency_penalty, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_json": guided_json, - "guided_regex": guided_regex, - "include_stop_str_in_output": include_stop_str_in_output, - "presence_penalty": presence_penalty, - "return_token_log_probs": return_token_log_probs, - "stop_sequences": stop_sequences, - "top_k": top_k, - "top_p": top_p, - } - max_new_tokens: MetaOapg.properties.max_new_tokens - temperature: MetaOapg.properties.temperature - prompt: MetaOapg.properties.prompt - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> MetaOapg.properties.frequency_penalty: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> MetaOapg.properties.include_stop_str_in_output: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> MetaOapg.properties.presence_penalty: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> MetaOapg.properties.return_token_log_probs: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_new_tokens"] - ) -> MetaOapg.properties.max_new_tokens: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_choice"] - ) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_grammar"] - ) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_json"] - ) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_regex"] - ) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["stop_sequences"] - ) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_k"] - ) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_p"] - ) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_new_tokens: typing.Union[ - MetaOapg.properties.max_new_tokens, - decimal.Decimal, - int, - ], - temperature: typing.Union[ - MetaOapg.properties.temperature, - decimal.Decimal, - int, - float, - ], - prompt: typing.Union[ - MetaOapg.properties.prompt, - str, - ], - frequency_penalty: typing.Union[ - MetaOapg.properties.frequency_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, str, schemas.Unset] = schemas.unset, - guided_json: typing.Union[ - MetaOapg.properties.guided_json, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, str, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[ - MetaOapg.properties.include_stop_str_in_output, bool, schemas.Unset - ] = schemas.unset, - presence_penalty: typing.Union[ - MetaOapg.properties.presence_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - return_token_log_probs: typing.Union[ - MetaOapg.properties.return_token_log_probs, bool, schemas.Unset - ] = schemas.unset, - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionStreamV1Request": - return super().__new__( - cls, - *_args, - max_new_tokens=max_new_tokens, - temperature=temperature, - prompt=prompt, - frequency_penalty=frequency_penalty, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_json=guided_json, - guided_regex=guided_regex, - include_stop_str_in_output=include_stop_str_in_output, - presence_penalty=presence_penalty, - return_token_log_probs=return_token_log_probs, - stop_sequences=stop_sequences, - top_k=top_k, - top_p=top_p, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/completion_stream_v1_response.py b/launch/api_client/model/completion_stream_v1_response.py index 0258e42c..3662050e 100644 --- a/launch/api_client/model/completion_stream_v1_response.py +++ b/launch/api_client/model/completion_stream_v1_response.py @@ -23,133 +23,112 @@ from launch.api_client import schemas # noqa: F401 -class CompletionStreamV1Response(schemas.DictSchema): +class CompletionStreamV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - Response object for a stream prompt completion task. + Error of the response (if any). """ + class MetaOapg: + required = { + "request_id", + } + class properties: + + + class request_id( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'request_id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def error() -> typing.Type["StreamError"]: - return StreamError - - @staticmethod - def output() -> typing.Type["CompletionStreamOutput"]: + def output() -> typing.Type['CompletionStreamOutput']: return CompletionStreamOutput - - request_id = schemas.StrSchema + + @staticmethod + def error() -> typing.Type['StreamError']: + return StreamError __annotations__ = { - "error": error, - "output": output, "request_id": request_id, + "output": output, + "error": error, } - + + request_id: MetaOapg.properties.request_id + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> "StreamError": - ... - + def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output"]) -> "CompletionStreamOutput": - ... - + def __getitem__(self, name: typing_extensions.Literal["output"]) -> 'CompletionStreamOutput': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "output", - "request_id", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["request_id", "output", "error", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> typing.Union["StreamError", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["output"] - ) -> typing.Union["CompletionStreamOutput", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["output"]) -> typing.Union['CompletionStreamOutput', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["request_id"] - ) -> typing.Union[MetaOapg.properties.request_id, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> typing.Union['StreamError', schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "output", - "request_id", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["request_id", "output", "error", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - error: typing.Union["StreamError", schemas.Unset] = schemas.unset, - output: typing.Union["CompletionStreamOutput", schemas.Unset] = schemas.unset, - request_id: typing.Union[MetaOapg.properties.request_id, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + request_id: typing.Union[MetaOapg.properties.request_id, None, str, ], + output: typing.Union['CompletionStreamOutput', schemas.Unset] = schemas.unset, + error: typing.Union['StreamError', schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionStreamV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionStreamV1Response': return super().__new__( cls, *_args, - error=error, - output=output, request_id=request_id, + output=output, + error=error, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.completion_stream_output import ( CompletionStreamOutput, ) diff --git a/launch/api_client/model/completion_stream_v1_response.pyi b/launch/api_client/model/completion_stream_v1_response.pyi deleted file mode 100644 index 20c11e87..00000000 --- a/launch/api_client/model/completion_stream_v1_response.pyi +++ /dev/null @@ -1,130 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CompletionStreamV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for a stream prompt completion task. - """ - - class MetaOapg: - class properties: - @staticmethod - def error() -> typing.Type["StreamError"]: - return StreamError - @staticmethod - def output() -> typing.Type["CompletionStreamOutput"]: - return CompletionStreamOutput - request_id = schemas.StrSchema - __annotations__ = { - "error": error, - "output": output, - "request_id": request_id, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> "StreamError": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output"]) -> "CompletionStreamOutput": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "output", - "request_id", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> typing.Union["StreamError", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["output"] - ) -> typing.Union["CompletionStreamOutput", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["request_id"] - ) -> typing.Union[MetaOapg.properties.request_id, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "output", - "request_id", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - error: typing.Union["StreamError", schemas.Unset] = schemas.unset, - output: typing.Union["CompletionStreamOutput", schemas.Unset] = schemas.unset, - request_id: typing.Union[MetaOapg.properties.request_id, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionStreamV1Response": - return super().__new__( - cls, - *_args, - error=error, - output=output, - request_id=request_id, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.completion_stream_output import CompletionStreamOutput -from launch_client.model.stream_error import StreamError diff --git a/launch/api_client/model/completion_sync_v1_request.py b/launch/api_client/model/completion_sync_v1_request.py index c303d69c..f0e775aa 100644 --- a/launch/api_client/model/completion_sync_v1_request.py +++ b/launch/api_client/model/completion_sync_v1_request.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CompletionSyncV1Request(schemas.DictSchema): +class CompletionSyncV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,404 +34,472 @@ class CompletionSyncV1Request(schemas.DictSchema): Request object for a synchronous prompt completion task. """ + class MetaOapg: required = { "max_new_tokens", "temperature", "prompt", } - + class properties: - max_new_tokens = schemas.IntSchema prompt = schemas.StrSchema - - class temperature(schemas.NumberSchema): + max_new_tokens = schemas.IntSchema + + + class temperature( + schemas.NumberSchema + ): + + class MetaOapg: inclusive_maximum = 1.0 inclusive_minimum = 0.0 - - class frequency_penalty(schemas.NumberSchema): - class MetaOapg: - inclusive_maximum = 2.0 - inclusive_minimum = 0.0 - - class guided_choice(schemas.ListSchema): + + + class stop_sequences( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + class MetaOapg: items = schemas.StrSchema - + + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[list, tuple, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "guided_choice": + ) -> 'stop_sequences': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - guided_grammar = schemas.StrSchema - guided_json = schemas.DictSchema - guided_regex = schemas.StrSchema - include_stop_str_in_output = schemas.BoolSchema - - class presence_penalty(schemas.NumberSchema): + + + class return_token_log_probs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'return_token_log_probs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class presence_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_maximum = 2.0 inclusive_minimum = 0.0 - - return_token_log_probs = schemas.BoolSchema - - class stop_sequences(schemas.ListSchema): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'presence_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class frequency_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: - items = schemas.StrSchema - + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[None, decimal.Decimal, int, float, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "stop_sequences": + ) -> 'frequency_penalty': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class top_k(schemas.IntSchema): + + + class top_k( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_minimum = -1 - - class top_p(schemas.NumberSchema): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_k': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_maximum = 1.0 - + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class include_stop_str_in_output( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'include_stop_str_in_output': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_json( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'guided_json': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_regex( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_regex': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_choice( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_grammar( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_grammar': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "max_new_tokens": max_new_tokens, "prompt": prompt, + "max_new_tokens": max_new_tokens, "temperature": temperature, - "frequency_penalty": frequency_penalty, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_json": guided_json, - "guided_regex": guided_regex, - "include_stop_str_in_output": include_stop_str_in_output, - "presence_penalty": presence_penalty, - "return_token_log_probs": return_token_log_probs, "stop_sequences": stop_sequences, + "return_token_log_probs": return_token_log_probs, + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, "top_k": top_k, "top_p": top_p, + "include_stop_str_in_output": include_stop_str_in_output, + "guided_json": guided_json, + "guided_regex": guided_regex, + "guided_choice": guided_choice, + "guided_grammar": guided_grammar, + "skip_special_tokens": skip_special_tokens, } - + max_new_tokens: MetaOapg.properties.max_new_tokens temperature: MetaOapg.properties.temperature prompt: MetaOapg.properties.prompt - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: - ... - + def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: - ... - + def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: - ... - + def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> MetaOapg.properties.frequency_penalty: - ... - + def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: - ... - + def __getitem__(self, name: typing_extensions.Literal["return_token_log_probs"]) -> MetaOapg.properties.return_token_log_probs: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: - ... - + def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: - ... - + def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: - ... - + def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> MetaOapg.properties.include_stop_str_in_output: - ... - + def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: - ... - + def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> MetaOapg.properties.return_token_log_probs: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: - ... - + def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["stop_sequences"]) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_choice"] - ) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["return_token_log_probs"]) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_grammar"] - ) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_json"] - ) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_regex"] - ) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["stop_sequences"] - ) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_k"] - ) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_p"] - ) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompt", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "include_stop_str_in_output", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "skip_special_tokens", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_new_tokens: typing.Union[ - MetaOapg.properties.max_new_tokens, - decimal.Decimal, - int, - ], - temperature: typing.Union[ - MetaOapg.properties.temperature, - decimal.Decimal, - int, - float, - ], - prompt: typing.Union[ - MetaOapg.properties.prompt, - str, - ], - frequency_penalty: typing.Union[ - MetaOapg.properties.frequency_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, str, schemas.Unset] = schemas.unset, - guided_json: typing.Union[ - MetaOapg.properties.guided_json, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, str, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[ - MetaOapg.properties.include_stop_str_in_output, bool, schemas.Unset - ] = schemas.unset, - presence_penalty: typing.Union[ - MetaOapg.properties.presence_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - return_token_log_probs: typing.Union[ - MetaOapg.properties.return_token_log_probs, bool, schemas.Unset - ] = schemas.unset, - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + max_new_tokens: typing.Union[MetaOapg.properties.max_new_tokens, decimal.Decimal, int, ], + temperature: typing.Union[MetaOapg.properties.temperature, decimal.Decimal, int, float, ], + prompt: typing.Union[MetaOapg.properties.prompt, str, ], + stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, None, schemas.Unset] = schemas.unset, + return_token_log_probs: typing.Union[MetaOapg.properties.return_token_log_probs, None, bool, schemas.Unset] = schemas.unset, + presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, + guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, + guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, + guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, + skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionSyncV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionSyncV1Request': return super().__new__( cls, *_args, max_new_tokens=max_new_tokens, temperature=temperature, prompt=prompt, - frequency_penalty=frequency_penalty, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_json=guided_json, - guided_regex=guided_regex, - include_stop_str_in_output=include_stop_str_in_output, - presence_penalty=presence_penalty, - return_token_log_probs=return_token_log_probs, stop_sequences=stop_sequences, + return_token_log_probs=return_token_log_probs, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, top_k=top_k, top_p=top_p, + include_stop_str_in_output=include_stop_str_in_output, + guided_json=guided_json, + guided_regex=guided_regex, + guided_choice=guided_choice, + guided_grammar=guided_grammar, + skip_special_tokens=skip_special_tokens, _configuration=_configuration, **kwargs, ) diff --git a/launch/api_client/model/completion_sync_v1_request.pyi b/launch/api_client/model/completion_sync_v1_request.pyi deleted file mode 100644 index 4a13ac1f..00000000 --- a/launch/api_client/model/completion_sync_v1_request.pyi +++ /dev/null @@ -1,359 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CompletionSyncV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for a synchronous prompt completion task. - """ - - class MetaOapg: - required = { - "max_new_tokens", - "temperature", - "prompt", - } - - class properties: - max_new_tokens = schemas.IntSchema - prompt = schemas.StrSchema - - class temperature(schemas.NumberSchema): - pass - - class frequency_penalty(schemas.NumberSchema): - pass - - class guided_choice(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "guided_choice": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - guided_grammar = schemas.StrSchema - guided_json = schemas.DictSchema - guided_regex = schemas.StrSchema - include_stop_str_in_output = schemas.BoolSchema - - class presence_penalty(schemas.NumberSchema): - pass - return_token_log_probs = schemas.BoolSchema - - class stop_sequences(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "stop_sequences": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class top_k(schemas.IntSchema): - pass - - class top_p(schemas.NumberSchema): - pass - __annotations__ = { - "max_new_tokens": max_new_tokens, - "prompt": prompt, - "temperature": temperature, - "frequency_penalty": frequency_penalty, - "guided_choice": guided_choice, - "guided_grammar": guided_grammar, - "guided_json": guided_json, - "guided_regex": guided_regex, - "include_stop_str_in_output": include_stop_str_in_output, - "presence_penalty": presence_penalty, - "return_token_log_probs": return_token_log_probs, - "stop_sequences": stop_sequences, - "top_k": top_k, - "top_p": top_p, - } - max_new_tokens: MetaOapg.properties.max_new_tokens - temperature: MetaOapg.properties.temperature - prompt: MetaOapg.properties.prompt - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> MetaOapg.properties.frequency_penalty: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> MetaOapg.properties.include_stop_str_in_output: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> MetaOapg.properties.presence_penalty: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> MetaOapg.properties.return_token_log_probs: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_new_tokens"] - ) -> MetaOapg.properties.max_new_tokens: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_choice"] - ) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_grammar"] - ) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_json"] - ) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["guided_regex"] - ) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["include_stop_str_in_output"] - ) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["stop_sequences"] - ) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_k"] - ) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_p"] - ) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompt", - "temperature", - "frequency_penalty", - "guided_choice", - "guided_grammar", - "guided_json", - "guided_regex", - "include_stop_str_in_output", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_new_tokens: typing.Union[ - MetaOapg.properties.max_new_tokens, - decimal.Decimal, - int, - ], - temperature: typing.Union[ - MetaOapg.properties.temperature, - decimal.Decimal, - int, - float, - ], - prompt: typing.Union[ - MetaOapg.properties.prompt, - str, - ], - frequency_penalty: typing.Union[ - MetaOapg.properties.frequency_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, schemas.Unset] = schemas.unset, - guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, str, schemas.Unset] = schemas.unset, - guided_json: typing.Union[ - MetaOapg.properties.guided_json, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - guided_regex: typing.Union[MetaOapg.properties.guided_regex, str, schemas.Unset] = schemas.unset, - include_stop_str_in_output: typing.Union[ - MetaOapg.properties.include_stop_str_in_output, bool, schemas.Unset - ] = schemas.unset, - presence_penalty: typing.Union[ - MetaOapg.properties.presence_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - return_token_log_probs: typing.Union[ - MetaOapg.properties.return_token_log_probs, bool, schemas.Unset - ] = schemas.unset, - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionSyncV1Request": - return super().__new__( - cls, - *_args, - max_new_tokens=max_new_tokens, - temperature=temperature, - prompt=prompt, - frequency_penalty=frequency_penalty, - guided_choice=guided_choice, - guided_grammar=guided_grammar, - guided_json=guided_json, - guided_regex=guided_regex, - include_stop_str_in_output=include_stop_str_in_output, - presence_penalty=presence_penalty, - return_token_log_probs=return_token_log_probs, - stop_sequences=stop_sequences, - top_k=top_k, - top_p=top_p, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/completion_sync_v1_response.py b/launch/api_client/model/completion_sync_v1_response.py index 7d77c8f0..491f81bd 100644 --- a/launch/api_client/model/completion_sync_v1_response.py +++ b/launch/api_client/model/completion_sync_v1_response.py @@ -23,114 +23,92 @@ from launch.api_client import schemas # noqa: F401 -class CompletionSyncV1Response(schemas.DictSchema): +class CompletionSyncV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - Response object for a synchronous prompt completion task. + Response object for a synchronous prompt completion. """ + class MetaOapg: + class properties: + + + class request_id( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'request_id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def output() -> typing.Type["CompletionOutput"]: + def output() -> typing.Type['CompletionOutput']: return CompletionOutput - - request_id = schemas.StrSchema __annotations__ = { - "output": output, "request_id": request_id, + "output": output, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output"]) -> "CompletionOutput": - ... - + def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["output"]) -> 'CompletionOutput': ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "output", - "request_id", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["request_id", "output", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["output"] - ) -> typing.Union["CompletionOutput", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["request_id"]) -> typing.Union[MetaOapg.properties.request_id, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["request_id"] - ) -> typing.Union[MetaOapg.properties.request_id, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["output"]) -> typing.Union['CompletionOutput', schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "output", - "request_id", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["request_id", "output", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - output: typing.Union["CompletionOutput", schemas.Unset] = schemas.unset, - request_id: typing.Union[MetaOapg.properties.request_id, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + request_id: typing.Union[MetaOapg.properties.request_id, None, str, schemas.Unset] = schemas.unset, + output: typing.Union['CompletionOutput', schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionSyncV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionSyncV1Response': return super().__new__( cls, *_args, - output=output, request_id=request_id, + output=output, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.completion_output import CompletionOutput diff --git a/launch/api_client/model/completion_sync_v1_response.pyi b/launch/api_client/model/completion_sync_v1_response.pyi deleted file mode 100644 index 911e77e9..00000000 --- a/launch/api_client/model/completion_sync_v1_response.pyi +++ /dev/null @@ -1,117 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CompletionSyncV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for a synchronous prompt completion task. - """ - - class MetaOapg: - class properties: - @staticmethod - def output() -> typing.Type["CompletionOutput"]: - return CompletionOutput - request_id = schemas.StrSchema - __annotations__ = { - "output": output, - "request_id": request_id, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["output"]) -> "CompletionOutput": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["request_id"]) -> MetaOapg.properties.request_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "output", - "request_id", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["output"] - ) -> typing.Union["CompletionOutput", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["request_id"] - ) -> typing.Union[MetaOapg.properties.request_id, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "output", - "request_id", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - output: typing.Union["CompletionOutput", schemas.Unset] = schemas.unset, - request_id: typing.Union[MetaOapg.properties.request_id, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CompletionSyncV1Response": - return super().__new__( - cls, - *_args, - output=output, - request_id=request_id, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.completion_output import CompletionOutput diff --git a/launch/api_client/model/completion_tokens_details.py b/launch/api_client/model/completion_tokens_details.py new file mode 100644 index 00000000..77ad3d2c --- /dev/null +++ b/launch/api_client/model/completion_tokens_details.py @@ -0,0 +1,108 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CompletionTokensDetails( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + accepted_prediction_tokens = schemas.IntSchema + audio_tokens = schemas.IntSchema + reasoning_tokens = schemas.IntSchema + rejected_prediction_tokens = schemas.IntSchema + __annotations__ = { + "accepted_prediction_tokens": accepted_prediction_tokens, + "audio_tokens": audio_tokens, + "reasoning_tokens": reasoning_tokens, + "rejected_prediction_tokens": rejected_prediction_tokens, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["accepted_prediction_tokens"]) -> MetaOapg.properties.accepted_prediction_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["audio_tokens"]) -> MetaOapg.properties.audio_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["reasoning_tokens"]) -> MetaOapg.properties.reasoning_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["rejected_prediction_tokens"]) -> MetaOapg.properties.rejected_prediction_tokens: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["accepted_prediction_tokens", "audio_tokens", "reasoning_tokens", "rejected_prediction_tokens", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["accepted_prediction_tokens"]) -> typing.Union[MetaOapg.properties.accepted_prediction_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["audio_tokens"]) -> typing.Union[MetaOapg.properties.audio_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["reasoning_tokens"]) -> typing.Union[MetaOapg.properties.reasoning_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["rejected_prediction_tokens"]) -> typing.Union[MetaOapg.properties.rejected_prediction_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["accepted_prediction_tokens", "audio_tokens", "reasoning_tokens", "rejected_prediction_tokens", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + accepted_prediction_tokens: typing.Union[MetaOapg.properties.accepted_prediction_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, + audio_tokens: typing.Union[MetaOapg.properties.audio_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, + reasoning_tokens: typing.Union[MetaOapg.properties.reasoning_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, + rejected_prediction_tokens: typing.Union[MetaOapg.properties.rejected_prediction_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionTokensDetails': + return super().__new__( + cls, + *_args, + accepted_prediction_tokens=accepted_prediction_tokens, + audio_tokens=audio_tokens, + reasoning_tokens=reasoning_tokens, + rejected_prediction_tokens=rejected_prediction_tokens, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/completion_usage.py b/launch/api_client/model/completion_usage.py new file mode 100644 index 00000000..b064817e --- /dev/null +++ b/launch/api_client/model/completion_usage.py @@ -0,0 +1,138 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CompletionUsage( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "completion_tokens", + "prompt_tokens", + "total_tokens", + } + + class properties: + completion_tokens = schemas.IntSchema + prompt_tokens = schemas.IntSchema + total_tokens = schemas.IntSchema + + @staticmethod + def completion_tokens_details() -> typing.Type['CompletionTokensDetails']: + return CompletionTokensDetails + + @staticmethod + def prompt_tokens_details() -> typing.Type['PromptTokensDetails']: + return PromptTokensDetails + __annotations__ = { + "completion_tokens": completion_tokens, + "prompt_tokens": prompt_tokens, + "total_tokens": total_tokens, + "completion_tokens_details": completion_tokens_details, + "prompt_tokens_details": prompt_tokens_details, + } + + completion_tokens: MetaOapg.properties.completion_tokens + prompt_tokens: MetaOapg.properties.prompt_tokens + total_tokens: MetaOapg.properties.total_tokens + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["completion_tokens"]) -> MetaOapg.properties.completion_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prompt_tokens"]) -> MetaOapg.properties.prompt_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["total_tokens"]) -> MetaOapg.properties.total_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["completion_tokens_details"]) -> 'CompletionTokensDetails': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prompt_tokens_details"]) -> 'PromptTokensDetails': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["completion_tokens", "prompt_tokens", "total_tokens", "completion_tokens_details", "prompt_tokens_details", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["completion_tokens"]) -> MetaOapg.properties.completion_tokens: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prompt_tokens"]) -> MetaOapg.properties.prompt_tokens: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["total_tokens"]) -> MetaOapg.properties.total_tokens: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["completion_tokens_details"]) -> typing.Union['CompletionTokensDetails', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prompt_tokens_details"]) -> typing.Union['PromptTokensDetails', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["completion_tokens", "prompt_tokens", "total_tokens", "completion_tokens_details", "prompt_tokens_details", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + completion_tokens: typing.Union[MetaOapg.properties.completion_tokens, decimal.Decimal, int, ], + prompt_tokens: typing.Union[MetaOapg.properties.prompt_tokens, decimal.Decimal, int, ], + total_tokens: typing.Union[MetaOapg.properties.total_tokens, decimal.Decimal, int, ], + completion_tokens_details: typing.Union['CompletionTokensDetails', schemas.Unset] = schemas.unset, + prompt_tokens_details: typing.Union['PromptTokensDetails', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionUsage': + return super().__new__( + cls, + *_args, + completion_tokens=completion_tokens, + prompt_tokens=prompt_tokens, + total_tokens=total_tokens, + completion_tokens_details=completion_tokens_details, + prompt_tokens_details=prompt_tokens_details, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.completion_tokens_details import ( + CompletionTokensDetails, +) +from launch.api_client.model.prompt_tokens_details import PromptTokensDetails diff --git a/launch/api_client/model/completion_v2_request.py b/launch/api_client/model/completion_v2_request.py new file mode 100644 index 00000000..674ccb64 --- /dev/null +++ b/launch/api_client/model/completion_v2_request.py @@ -0,0 +1,1278 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CompletionV2Request( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "model", + "prompt", + } + + class properties: + model = schemas.StrSchema + + + class prompt( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + + class any_of_1( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'any_of_1': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + Prompt, + Prompt1, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class best_of( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 20 + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'best_of': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_k( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = -1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_k': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class use_beam_search( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'use_beam_search': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class length_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'length_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class repetition_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'repetition_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class early_stopping( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'early_stopping': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stop_token_ids( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stop_token_ids': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class include_stop_str_in_output( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'include_stop_str_in_output': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ignore_eos( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ignore_eos': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class spaces_between_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'spaces_between_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class add_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'add_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class response_format( + schemas.ComposedSchema, + ): + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + ResponseFormatText, + ResponseFormatJsonSchema, + ResponseFormatJsonObject, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'response_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_json( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'guided_json': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_regex( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_regex': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_choice( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_grammar( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_grammar': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_decoding_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_decoding_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_whitespace_pattern( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_whitespace_pattern': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class echo( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'echo': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class frequency_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'frequency_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class logit_bias( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.IntSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], + ) -> 'logit_bias': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class logprobs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 5 + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class n( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 128 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'n': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class presence_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'presence_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stop() -> typing.Type['StopConfiguration']: + return StopConfiguration + + + class stream( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stream': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stream_options() -> typing.Type['ChatCompletionStreamOptions']: + return ChatCompletionStreamOptions + + + class suffix( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'suffix': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class temperature( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'temperature': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 1.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class user( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'user': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "model": model, + "prompt": prompt, + "best_of": best_of, + "top_k": top_k, + "min_p": min_p, + "use_beam_search": use_beam_search, + "length_penalty": length_penalty, + "repetition_penalty": repetition_penalty, + "early_stopping": early_stopping, + "stop_token_ids": stop_token_ids, + "include_stop_str_in_output": include_stop_str_in_output, + "ignore_eos": ignore_eos, + "min_tokens": min_tokens, + "skip_special_tokens": skip_special_tokens, + "spaces_between_special_tokens": spaces_between_special_tokens, + "add_special_tokens": add_special_tokens, + "response_format": response_format, + "guided_json": guided_json, + "guided_regex": guided_regex, + "guided_choice": guided_choice, + "guided_grammar": guided_grammar, + "guided_decoding_backend": guided_decoding_backend, + "guided_whitespace_pattern": guided_whitespace_pattern, + "echo": echo, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "presence_penalty": presence_penalty, + "seed": seed, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "suffix": suffix, + "temperature": temperature, + "top_p": top_p, + "user": user, + } + + model: MetaOapg.properties.model + prompt: MetaOapg.properties.prompt + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["suffix"]) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + model: typing.Union[MetaOapg.properties.model, str, ], + prompt: typing.Union[MetaOapg.properties.prompt, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, + length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, + stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, + include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, + ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, + min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, + spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, + add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, + response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, + guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, + guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, + guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, + guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, + echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, + frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + logprobs: typing.Union[MetaOapg.properties.logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, + stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, + stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, + suffix: typing.Union[MetaOapg.properties.suffix, None, str, schemas.Unset] = schemas.unset, + temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionV2Request': + return super().__new__( + cls, + *_args, + model=model, + prompt=prompt, + best_of=best_of, + top_k=top_k, + min_p=min_p, + use_beam_search=use_beam_search, + length_penalty=length_penalty, + repetition_penalty=repetition_penalty, + early_stopping=early_stopping, + stop_token_ids=stop_token_ids, + include_stop_str_in_output=include_stop_str_in_output, + ignore_eos=ignore_eos, + min_tokens=min_tokens, + skip_special_tokens=skip_special_tokens, + spaces_between_special_tokens=spaces_between_special_tokens, + add_special_tokens=add_special_tokens, + response_format=response_format, + guided_json=guided_json, + guided_regex=guided_regex, + guided_choice=guided_choice, + guided_grammar=guided_grammar, + guided_decoding_backend=guided_decoding_backend, + guided_whitespace_pattern=guided_whitespace_pattern, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + max_tokens=max_tokens, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + stream=stream, + stream_options=stream_options, + suffix=suffix, + temperature=temperature, + top_p=top_p, + user=user, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_stream_options import ( + ChatCompletionStreamOptions, +) +from launch.api_client.model.prompt import Prompt +from launch.api_client.model.prompt1 import Prompt1 +from launch.api_client.model.response_format_json_object import ( + ResponseFormatJsonObject, +) +from launch.api_client.model.response_format_json_schema import ( + ResponseFormatJsonSchema, +) +from launch.api_client.model.response_format_text import ResponseFormatText +from launch.api_client.model.stop_configuration import StopConfiguration diff --git a/launch/api_client/model/completion_v2_stream_error_chunk.py b/launch/api_client/model/completion_v2_stream_error_chunk.py new file mode 100644 index 00000000..085b8335 --- /dev/null +++ b/launch/api_client/model/completion_v2_stream_error_chunk.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CompletionV2StreamErrorChunk( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "error", + } + + class properties: + + @staticmethod + def error() -> typing.Type['StreamError']: + return StreamError + __annotations__ = { + "error": error, + } + + error: 'StreamError' + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["error", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> 'StreamError': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["error", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + error: 'StreamError', + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CompletionV2StreamErrorChunk': + return super().__new__( + cls, + *_args, + error=error, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.stream_error import StreamError diff --git a/launch/api_client/model/content.py b/launch/api_client/model/content.py new file mode 100644 index 00000000..d444926a --- /dev/null +++ b/launch/api_client/model/content.py @@ -0,0 +1,62 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Content( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`. + """ + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestAssistantMessageContentPart']: + return ChatCompletionRequestAssistantMessageContentPart + min_items = 1 + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + +from launch.api_client.model.chat_completion_request_assistant_message_content_part import ( + ChatCompletionRequestAssistantMessageContentPart, +) diff --git a/launch/api_client/model/content1.py b/launch/api_client/model/content1.py new file mode 100644 index 00000000..ccd7efd3 --- /dev/null +++ b/launch/api_client/model/content1.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Content1( + schemas.ListSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + An array of content parts with a defined type. For developer messages, only type `text` is supported. + """ + + + class MetaOapg: + min_items = 1 + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: + return ChatCompletionRequestMessageContentPartText + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Content1': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': + return super().__getitem__(i) + +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) diff --git a/launch/api_client/model/content2.py b/launch/api_client/model/content2.py new file mode 100644 index 00000000..c103bcb7 --- /dev/null +++ b/launch/api_client/model/content2.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Content2( + schemas.ListSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + An array of content parts with a defined type. For system messages, only type `text` is supported. + """ + + + class MetaOapg: + min_items = 1 + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: + return ChatCompletionRequestMessageContentPartText + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Content2': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': + return super().__getitem__(i) + +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) diff --git a/launch/api_client/model/content3.py b/launch/api_client/model/content3.py new file mode 100644 index 00000000..95d71f60 --- /dev/null +++ b/launch/api_client/model/content3.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Content3( + schemas.ListSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + An array of content parts with a defined type. For tool messages, only type `text` is supported. + """ + + + class MetaOapg: + min_items = 1 + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: + return ChatCompletionRequestMessageContentPartText + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Content3': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': + return super().__getitem__(i) + +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) diff --git a/launch/api_client/model/content4.py b/launch/api_client/model/content4.py new file mode 100644 index 00000000..313ebc22 --- /dev/null +++ b/launch/api_client/model/content4.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Content4( + schemas.ListSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs. + """ + + + class MetaOapg: + min_items = 1 + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestUserMessageContentPart']: + return ChatCompletionRequestUserMessageContentPart + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionRequestUserMessageContentPart'], typing.List['ChatCompletionRequestUserMessageContentPart']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Content4': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionRequestUserMessageContentPart': + return super().__getitem__(i) + +from launch.api_client.model.chat_completion_request_user_message_content_part import ( + ChatCompletionRequestUserMessageContentPart, +) diff --git a/launch/api_client/model/content8.py b/launch/api_client/model/content8.py new file mode 100644 index 00000000..9edcdaef --- /dev/null +++ b/launch/api_client/model/content8.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Content8( + schemas.ListSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs. + """ + + + class MetaOapg: + min_items = 1 + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestMessageContentPartText']: + return ChatCompletionRequestMessageContentPartText + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessageContentPartText'], typing.List['ChatCompletionRequestMessageContentPartText']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Content8': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionRequestMessageContentPartText': + return super().__getitem__(i) + +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) diff --git a/launch/api_client/model/create_async_task_v1_response.py b/launch/api_client/model/create_async_task_v1_response.py index 83b2baaf..87e7482b 100644 --- a/launch/api_client/model/create_async_task_v1_response.py +++ b/launch/api_client/model/create_async_task_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateAsyncTaskV1Response(schemas.DictSchema): +class CreateAsyncTaskV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "task_id", } - + class properties: task_id = schemas.StrSchema __annotations__ = { "task_id": task_id, } - + task_id: MetaOapg.properties.task_id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["task_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["task_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["task_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["task_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - task_id: typing.Union[ - MetaOapg.properties.task_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + task_id: typing.Union[MetaOapg.properties.task_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateAsyncTaskV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateAsyncTaskV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_async_task_v1_response.pyi b/launch/api_client/model/create_async_task_v1_response.pyi deleted file mode 100644 index f5577ea5..00000000 --- a/launch/api_client/model/create_async_task_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateAsyncTaskV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "task_id", - } - - class properties: - task_id = schemas.StrSchema - __annotations__ = { - "task_id": task_id, - } - task_id: MetaOapg.properties.task_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["task_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["task_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - task_id: typing.Union[ - MetaOapg.properties.task_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateAsyncTaskV1Response": - return super().__new__( - cls, - *_args, - task_id=task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_model_config.py b/launch/api_client/model/create_batch_completions_model_config.py index 67fd8f88..73ec228c 100644 --- a/launch/api_client/model/create_batch_completions_model_config.py +++ b/launch/api_client/model/create_batch_completions_model_config.py @@ -43,14 +43,18 @@ class MetaOapg: def __getitem__( self, - name: typing.Union[str,], + name: typing.Union[ + str, + ], ) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) def get_item_oapg( self, - name: typing.Union[str,], + name: typing.Union[ + str, + ], ) -> MetaOapg.additional_properties: return super().get_item_oapg(name) diff --git a/launch/api_client/model/create_batch_completions_model_config.pyi b/launch/api_client/model/create_batch_completions_model_config.pyi deleted file mode 100644 index f8cd67ea..00000000 --- a/launch/api_client/model/create_batch_completions_model_config.pyi +++ /dev/null @@ -1,208 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateBatchCompletionsModelConfig(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "model", - "labels", - } - - class properties: - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - model = schemas.StrSchema - checkpoint_path = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def quantize() -> typing.Type["Quantization"]: - return Quantization - seed = schemas.IntSchema - __annotations__ = { - "labels": labels, - "model": model, - "checkpoint_path": checkpoint_path, - "num_shards": num_shards, - "quantize": quantize, - "seed": seed, - } - model: MetaOapg.properties.model - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> MetaOapg.properties.checkpoint_path: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "model", - "checkpoint_path", - "num_shards", - "quantize", - "seed", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["quantize"] - ) -> typing.Union["Quantization", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["seed"] - ) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "model", - "checkpoint_path", - "num_shards", - "quantize", - "seed", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model: typing.Union[ - MetaOapg.properties.model, - str, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - seed: typing.Union[MetaOapg.properties.seed, decimal.Decimal, int, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsModelConfig": - return super().__new__( - cls, - *_args, - model=model, - labels=labels, - checkpoint_path=checkpoint_path, - num_shards=num_shards, - quantize=quantize, - seed=seed, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_batch_completions_request.pyi b/launch/api_client/model/create_batch_completions_request.pyi deleted file mode 100644 index 40100f37..00000000 --- a/launch/api_client/model/create_batch_completions_request.pyi +++ /dev/null @@ -1,213 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateBatchCompletionsRequest(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for batch completions. - """ - - class MetaOapg: - required = { - "model_config", - "output_data_path", - } - - class properties: - @staticmethod - def model_config() -> typing.Type["CreateBatchCompletionsModelConfig"]: - return CreateBatchCompletionsModelConfig - output_data_path = schemas.StrSchema - - @staticmethod - def content() -> typing.Type["CreateBatchCompletionsRequestContent"]: - return CreateBatchCompletionsRequestContent - - class data_parallelism(schemas.IntSchema): - pass - input_data_path = schemas.StrSchema - - class max_runtime_sec(schemas.IntSchema): - pass - @staticmethod - def tool_config() -> typing.Type["ToolConfig"]: - return ToolConfig - __annotations__ = { - "model_config": model_config, - "output_data_path": output_data_path, - "content": content, - "data_parallelism": data_parallelism, - "input_data_path": input_data_path, - "max_runtime_sec": max_runtime_sec, - "tool_config": tool_config, - } - model_config: "CreateBatchCompletionsModelConfig" - output_data_path: MetaOapg.properties.output_data_path - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> "CreateBatchCompletionsModelConfig": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["output_data_path"] - ) -> MetaOapg.properties.output_data_path: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> "CreateBatchCompletionsRequestContent": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["data_parallelism"] - ) -> MetaOapg.properties.data_parallelism: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["input_data_path"] - ) -> MetaOapg.properties.input_data_path: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["max_runtime_sec"] - ) -> MetaOapg.properties.max_runtime_sec: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tool_config"]) -> "ToolConfig": ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_config", - "output_data_path", - "content", - "data_parallelism", - "input_data_path", - "max_runtime_sec", - "tool_config", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> "CreateBatchCompletionsModelConfig": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["output_data_path"] - ) -> MetaOapg.properties.output_data_path: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["content"] - ) -> typing.Union["CreateBatchCompletionsRequestContent", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["data_parallelism"] - ) -> typing.Union[MetaOapg.properties.data_parallelism, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["input_data_path"] - ) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_runtime_sec"] - ) -> typing.Union[MetaOapg.properties.max_runtime_sec, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tool_config"] - ) -> typing.Union["ToolConfig", schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_config", - "output_data_path", - "content", - "data_parallelism", - "input_data_path", - "max_runtime_sec", - "tool_config", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_config: "CreateBatchCompletionsModelConfig", - output_data_path: typing.Union[ - MetaOapg.properties.output_data_path, - str, - ], - content: typing.Union["CreateBatchCompletionsRequestContent", schemas.Unset] = schemas.unset, - data_parallelism: typing.Union[ - MetaOapg.properties.data_parallelism, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - input_data_path: typing.Union[MetaOapg.properties.input_data_path, str, schemas.Unset] = schemas.unset, - max_runtime_sec: typing.Union[ - MetaOapg.properties.max_runtime_sec, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - tool_config: typing.Union["ToolConfig", schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsRequest": - return super().__new__( - cls, - *_args, - model_config=model_config, - output_data_path=output_data_path, - content=content, - data_parallelism=data_parallelism, - input_data_path=input_data_path, - max_runtime_sec=max_runtime_sec, - tool_config=tool_config, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.create_batch_completions_model_config import ( - CreateBatchCompletionsModelConfig, -) -from launch_client.model.create_batch_completions_request_content import ( - CreateBatchCompletionsRequestContent, -) -from launch_client.model.tool_config import ToolConfig diff --git a/launch/api_client/model/create_batch_completions_request_content.pyi b/launch/api_client/model/create_batch_completions_request_content.pyi deleted file mode 100644 index 231dcabb..00000000 --- a/launch/api_client/model/create_batch_completions_request_content.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateBatchCompletionsRequestContent(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "max_new_tokens", - "temperature", - "prompts", - } - - class properties: - max_new_tokens = schemas.IntSchema - - class prompts(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "prompts": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class temperature(schemas.NumberSchema): - pass - - class frequency_penalty(schemas.NumberSchema): - pass - - class presence_penalty(schemas.NumberSchema): - pass - return_token_log_probs = schemas.BoolSchema - - class stop_sequences(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "stop_sequences": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class top_k(schemas.IntSchema): - pass - - class top_p(schemas.NumberSchema): - pass - __annotations__ = { - "max_new_tokens": max_new_tokens, - "prompts": prompts, - "temperature": temperature, - "frequency_penalty": frequency_penalty, - "presence_penalty": presence_penalty, - "return_token_log_probs": return_token_log_probs, - "stop_sequences": stop_sequences, - "top_k": top_k, - "top_p": top_p, - } - max_new_tokens: MetaOapg.properties.max_new_tokens - temperature: MetaOapg.properties.temperature - prompts: MetaOapg.properties.prompts - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> MetaOapg.properties.frequency_penalty: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> MetaOapg.properties.presence_penalty: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> MetaOapg.properties.return_token_log_probs: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompts", - "temperature", - "frequency_penalty", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_new_tokens"] - ) -> MetaOapg.properties.max_new_tokens: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["frequency_penalty"] - ) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["presence_penalty"] - ) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_token_log_probs"] - ) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["stop_sequences"] - ) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_k"] - ) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["top_p"] - ) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_new_tokens", - "prompts", - "temperature", - "frequency_penalty", - "presence_penalty", - "return_token_log_probs", - "stop_sequences", - "top_k", - "top_p", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_new_tokens: typing.Union[ - MetaOapg.properties.max_new_tokens, - decimal.Decimal, - int, - ], - temperature: typing.Union[ - MetaOapg.properties.temperature, - decimal.Decimal, - int, - float, - ], - prompts: typing.Union[ - MetaOapg.properties.prompts, - list, - tuple, - ], - frequency_penalty: typing.Union[ - MetaOapg.properties.frequency_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - presence_penalty: typing.Union[ - MetaOapg.properties.presence_penalty, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - return_token_log_probs: typing.Union[ - MetaOapg.properties.return_token_log_probs, bool, schemas.Unset - ] = schemas.unset, - stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, schemas.Unset] = schemas.unset, - top_k: typing.Union[MetaOapg.properties.top_k, decimal.Decimal, int, schemas.Unset] = schemas.unset, - top_p: typing.Union[MetaOapg.properties.top_p, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsRequestContent": - return super().__new__( - cls, - *_args, - max_new_tokens=max_new_tokens, - temperature=temperature, - prompts=prompts, - frequency_penalty=frequency_penalty, - presence_penalty=presence_penalty, - return_token_log_probs=return_token_log_probs, - stop_sequences=stop_sequences, - top_k=top_k, - top_p=top_p, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_batch_completions_response.py b/launch/api_client/model/create_batch_completions_response.py index b34b09bc..26631968 100644 --- a/launch/api_client/model/create_batch_completions_response.py +++ b/launch/api_client/model/create_batch_completions_response.py @@ -54,7 +54,9 @@ def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: def __getitem__( self, name: typing.Union[ - typing_extensions.Literal["job_id",], + typing_extensions.Literal[ + "job_id", + ], str, ], ): @@ -72,7 +74,9 @@ def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, s def get_item_oapg( self, name: typing.Union[ - typing_extensions.Literal["job_id",], + typing_extensions.Literal[ + "job_id", + ], str, ], ): diff --git a/launch/api_client/model/create_batch_completions_v1_model_config.py b/launch/api_client/model/create_batch_completions_v1_model_config.py new file mode 100644 index 00000000..08ec16ae --- /dev/null +++ b/launch/api_client/model/create_batch_completions_v1_model_config.py @@ -0,0 +1,1328 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateBatchCompletionsV1ModelConfig( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "model", + } + + class properties: + model = schemas.StrSchema + + + class max_model_len( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_model_len': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_seqs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_seqs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enforce_eager( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enforce_eager': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class trust_remote_code( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'trust_remote_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class pipeline_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'pipeline_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tensor_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tensor_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_log_requests( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_log_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tool_call_parser( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tool_call_parser': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_auto_tool_choice( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_auto_tool_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class config_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'config_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer_mode( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_mode': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class limit_mm_per_prompt( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'limit_mm_per_prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_batched_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_batched_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class code_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'code_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class rope_scaling( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'rope_scaling': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class tokenizer_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization_param_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization_param_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_seq_len_to_capture( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_seq_len_to_capture': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_sliding_window( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_sliding_window': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_tokenizer_init( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_tokenizer_init': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class served_model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'served_model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class override_neuron_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'override_neuron_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class mm_processor_kwargs( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'mm_processor_kwargs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class block_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'block_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class gpu_memory_utilization( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpu_memory_utilization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class swap_space( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'swap_space': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cache_dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cache_dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_gpu_blocks_override( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_gpu_blocks_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_prefix_caching( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_prefix_caching': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_shards( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_shards': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_context_length( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_context_length': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class response_role( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'response_role': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + __annotations__ = { + "model": model, + "max_model_len": max_model_len, + "max_num_seqs": max_num_seqs, + "enforce_eager": enforce_eager, + "trust_remote_code": trust_remote_code, + "pipeline_parallel_size": pipeline_parallel_size, + "tensor_parallel_size": tensor_parallel_size, + "quantization": quantization, + "disable_log_requests": disable_log_requests, + "chat_template": chat_template, + "tool_call_parser": tool_call_parser, + "enable_auto_tool_choice": enable_auto_tool_choice, + "load_format": load_format, + "config_format": config_format, + "tokenizer_mode": tokenizer_mode, + "limit_mm_per_prompt": limit_mm_per_prompt, + "max_num_batched_tokens": max_num_batched_tokens, + "tokenizer": tokenizer, + "dtype": dtype, + "seed": seed, + "revision": revision, + "code_revision": code_revision, + "rope_scaling": rope_scaling, + "tokenizer_revision": tokenizer_revision, + "quantization_param_path": quantization_param_path, + "max_seq_len_to_capture": max_seq_len_to_capture, + "disable_sliding_window": disable_sliding_window, + "skip_tokenizer_init": skip_tokenizer_init, + "served_model_name": served_model_name, + "override_neuron_config": override_neuron_config, + "mm_processor_kwargs": mm_processor_kwargs, + "block_size": block_size, + "gpu_memory_utilization": gpu_memory_utilization, + "swap_space": swap_space, + "cache_dtype": cache_dtype, + "num_gpu_blocks_override": num_gpu_blocks_override, + "enable_prefix_caching": enable_prefix_caching, + "checkpoint_path": checkpoint_path, + "num_shards": num_shards, + "max_context_length": max_context_length, + "response_role": response_role, + "labels": labels, + } + + model: MetaOapg.properties.model + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_context_length"]) -> MetaOapg.properties.max_context_length: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["response_role"]) -> MetaOapg.properties.response_role: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", "labels", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_context_length"]) -> typing.Union[MetaOapg.properties.max_context_length, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["response_role"]) -> typing.Union[MetaOapg.properties.response_role, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "checkpoint_path", "num_shards", "max_context_length", "response_role", "labels", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + model: typing.Union[MetaOapg.properties.model, str, ], + max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, + trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, + pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, + disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, + enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, + load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, + config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, + tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, + limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, + max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, + dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, + code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, + rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, + quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, + max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, + skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, + served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, + override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, + num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_context_length: typing.Union[MetaOapg.properties.max_context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + response_role: typing.Union[MetaOapg.properties.response_role, None, str, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchCompletionsV1ModelConfig': + return super().__new__( + cls, + *_args, + model=model, + max_model_len=max_model_len, + max_num_seqs=max_num_seqs, + enforce_eager=enforce_eager, + trust_remote_code=trust_remote_code, + pipeline_parallel_size=pipeline_parallel_size, + tensor_parallel_size=tensor_parallel_size, + quantization=quantization, + disable_log_requests=disable_log_requests, + chat_template=chat_template, + tool_call_parser=tool_call_parser, + enable_auto_tool_choice=enable_auto_tool_choice, + load_format=load_format, + config_format=config_format, + tokenizer_mode=tokenizer_mode, + limit_mm_per_prompt=limit_mm_per_prompt, + max_num_batched_tokens=max_num_batched_tokens, + tokenizer=tokenizer, + dtype=dtype, + seed=seed, + revision=revision, + code_revision=code_revision, + rope_scaling=rope_scaling, + tokenizer_revision=tokenizer_revision, + quantization_param_path=quantization_param_path, + max_seq_len_to_capture=max_seq_len_to_capture, + disable_sliding_window=disable_sliding_window, + skip_tokenizer_init=skip_tokenizer_init, + served_model_name=served_model_name, + override_neuron_config=override_neuron_config, + mm_processor_kwargs=mm_processor_kwargs, + block_size=block_size, + gpu_memory_utilization=gpu_memory_utilization, + swap_space=swap_space, + cache_dtype=cache_dtype, + num_gpu_blocks_override=num_gpu_blocks_override, + enable_prefix_caching=enable_prefix_caching, + checkpoint_path=checkpoint_path, + num_shards=num_shards, + max_context_length=max_context_length, + response_role=response_role, + labels=labels, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/create_batch_completions_v1_request.py b/launch/api_client/model/create_batch_completions_v1_request.py new file mode 100644 index 00000000..f2e00cdb --- /dev/null +++ b/launch/api_client/model/create_batch_completions_v1_request.py @@ -0,0 +1,520 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateBatchCompletionsV1Request( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Request object for batch completions. + """ + + + class MetaOapg: + required = { + "model_config", + "output_data_path", + } + + class properties: + output_data_path = schemas.StrSchema + + @staticmethod + def model_config() -> typing.Type['CreateBatchCompletionsV1ModelConfig']: + return CreateBatchCompletionsV1ModelConfig + + + class input_data_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'input_data_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class data_parallelism( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 64 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'data_parallelism': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_runtime_sec( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 172800 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_runtime_sec': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class priority( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def tool_config() -> typing.Type['ToolConfig']: + return ToolConfig + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def content() -> typing.Type['CreateBatchCompletionsV1RequestContent']: + return CreateBatchCompletionsV1RequestContent + __annotations__ = { + "output_data_path": output_data_path, + "model_config": model_config, + "input_data_path": input_data_path, + "labels": labels, + "data_parallelism": data_parallelism, + "max_runtime_sec": max_runtime_sec, + "priority": priority, + "tool_config": tool_config, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "content": content, + } + + model_config: 'CreateBatchCompletionsV1ModelConfig' + output_data_path: MetaOapg.properties.output_data_path + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'CreateBatchCompletionsV1ModelConfig': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["data_parallelism"]) -> MetaOapg.properties.data_parallelism: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_runtime_sec"]) -> MetaOapg.properties.max_runtime_sec: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_config"]) -> 'ToolConfig': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> 'CreateBatchCompletionsV1RequestContent': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'CreateBatchCompletionsV1ModelConfig': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["data_parallelism"]) -> typing.Union[MetaOapg.properties.data_parallelism, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_runtime_sec"]) -> typing.Union[MetaOapg.properties.max_runtime_sec, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_config"]) -> typing.Union['ToolConfig', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union['CreateBatchCompletionsV1RequestContent', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + model_config: 'CreateBatchCompletionsV1ModelConfig', + output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], + input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, + data_parallelism: typing.Union[MetaOapg.properties.data_parallelism, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_runtime_sec: typing.Union[MetaOapg.properties.max_runtime_sec, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, + tool_config: typing.Union['ToolConfig', schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + content: typing.Union['CreateBatchCompletionsV1RequestContent', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchCompletionsV1Request': + return super().__new__( + cls, + *_args, + model_config=model_config, + output_data_path=output_data_path, + input_data_path=input_data_path, + labels=labels, + data_parallelism=data_parallelism, + max_runtime_sec=max_runtime_sec, + priority=priority, + tool_config=tool_config, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + content=content, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.create_batch_completions_v1_model_config import ( + CreateBatchCompletionsV1ModelConfig, +) +from launch.api_client.model.create_batch_completions_v1_request_content import ( + CreateBatchCompletionsV1RequestContent, +) +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.tool_config import ToolConfig diff --git a/launch/api_client/model/create_batch_completions_v1_request_content.py b/launch/api_client/model/create_batch_completions_v1_request_content.py new file mode 100644 index 00000000..9501d737 --- /dev/null +++ b/launch/api_client/model/create_batch_completions_v1_request_content.py @@ -0,0 +1,363 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateBatchCompletionsV1RequestContent( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "max_new_tokens", + "temperature", + "prompts", + } + + class properties: + + + class prompts( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prompts': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + max_new_tokens = schemas.IntSchema + + + class temperature( + schemas.NumberSchema + ): + + + class MetaOapg: + inclusive_maximum = 1.0 + inclusive_minimum = 0.0 + + + class stop_sequences( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stop_sequences': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class return_token_log_probs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'return_token_log_probs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class presence_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'presence_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class frequency_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'frequency_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_k( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = -1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_k': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 1.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "prompts": prompts, + "max_new_tokens": max_new_tokens, + "temperature": temperature, + "stop_sequences": stop_sequences, + "return_token_log_probs": return_token_log_probs, + "presence_penalty": presence_penalty, + "frequency_penalty": frequency_penalty, + "top_k": top_k, + "top_p": top_p, + "skip_special_tokens": skip_special_tokens, + } + + max_new_tokens: MetaOapg.properties.max_new_tokens + temperature: MetaOapg.properties.temperature + prompts: MetaOapg.properties.prompts + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop_sequences"]) -> MetaOapg.properties.stop_sequences: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["return_token_log_probs"]) -> MetaOapg.properties.return_token_log_probs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompts", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "skip_special_tokens", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prompts"]) -> MetaOapg.properties.prompts: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_new_tokens"]) -> MetaOapg.properties.max_new_tokens: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop_sequences"]) -> typing.Union[MetaOapg.properties.stop_sequences, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["return_token_log_probs"]) -> typing.Union[MetaOapg.properties.return_token_log_probs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompts", "max_new_tokens", "temperature", "stop_sequences", "return_token_log_probs", "presence_penalty", "frequency_penalty", "top_k", "top_p", "skip_special_tokens", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + max_new_tokens: typing.Union[MetaOapg.properties.max_new_tokens, decimal.Decimal, int, ], + temperature: typing.Union[MetaOapg.properties.temperature, decimal.Decimal, int, float, ], + prompts: typing.Union[MetaOapg.properties.prompts, list, tuple, ], + stop_sequences: typing.Union[MetaOapg.properties.stop_sequences, list, tuple, None, schemas.Unset] = schemas.unset, + return_token_log_probs: typing.Union[MetaOapg.properties.return_token_log_probs, None, bool, schemas.Unset] = schemas.unset, + presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchCompletionsV1RequestContent': + return super().__new__( + cls, + *_args, + max_new_tokens=max_new_tokens, + temperature=temperature, + prompts=prompts, + stop_sequences=stop_sequences, + return_token_log_probs=return_token_log_probs, + presence_penalty=presence_penalty, + frequency_penalty=frequency_penalty, + top_k=top_k, + top_p=top_p, + skip_special_tokens=skip_special_tokens, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/create_batch_job_v1_response.pyi b/launch/api_client/model/create_batch_completions_v1_response.py similarity index 65% rename from launch/api_client/model/create_batch_job_v1_response.pyi rename to launch/api_client/model/create_batch_completions_v1_response.py index fe85812f..93385a5d 100644 --- a/launch/api_client/model/create_batch_job_v1_response.pyi +++ b/launch/api_client/model/create_batch_completions_v1_response.py @@ -19,80 +19,61 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class CreateBatchJobV1Response(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class CreateBatchCompletionsV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "job_id", } - + class properties: job_id = schemas.StrSchema __annotations__ = { "job_id": job_id, } + job_id: MetaOapg.properties.job_id - + @typing.overload def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - job_id: typing.Union[ - MetaOapg.properties.job_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + job_id: typing.Union[MetaOapg.properties.job_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchCompletionsV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_batch_completions_v2_request.py b/launch/api_client/model/create_batch_completions_v2_request.py new file mode 100644 index 00000000..efd81e88 --- /dev/null +++ b/launch/api_client/model/create_batch_completions_v2_request.py @@ -0,0 +1,612 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateBatchCompletionsV2Request( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Request object for batch completions. + """ + + + class MetaOapg: + required = { + "model_config", + "output_data_path", + } + + class properties: + output_data_path = schemas.StrSchema + + @staticmethod + def model_config() -> typing.Type['BatchCompletionsModelConfig']: + return BatchCompletionsModelConfig + + + class input_data_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'input_data_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class data_parallelism( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 64 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'data_parallelism': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_runtime_sec( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 172800 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_runtime_sec': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class priority( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def tool_config() -> typing.Type['ToolConfig']: + return ToolConfig + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class content( + schemas.ComposedSchema, + ): + + + class MetaOapg: + + + class any_of_1( + schemas.ListSchema + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['FilteredCompletionV2Request']: + return FilteredCompletionV2Request + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['FilteredCompletionV2Request'], typing.List['FilteredCompletionV2Request']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'any_of_1': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'FilteredCompletionV2Request': + return super().__getitem__(i) + + + class any_of_2( + schemas.ListSchema + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['FilteredChatCompletionV2Request']: + return FilteredChatCompletionV2Request + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['FilteredChatCompletionV2Request'], typing.List['FilteredChatCompletionV2Request']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'any_of_2': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'FilteredChatCompletionV2Request': + return super().__getitem__(i) + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + CreateBatchCompletionsV1RequestContent, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + __annotations__ = { + "output_data_path": output_data_path, + "model_config": model_config, + "input_data_path": input_data_path, + "labels": labels, + "data_parallelism": data_parallelism, + "max_runtime_sec": max_runtime_sec, + "priority": priority, + "tool_config": tool_config, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "content": content, + } + + model_config: 'BatchCompletionsModelConfig' + output_data_path: MetaOapg.properties.output_data_path + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["data_parallelism"]) -> MetaOapg.properties.data_parallelism: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_runtime_sec"]) -> MetaOapg.properties.max_runtime_sec: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_config"]) -> 'ToolConfig': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["data_parallelism"]) -> typing.Union[MetaOapg.properties.data_parallelism, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_runtime_sec"]) -> typing.Union[MetaOapg.properties.max_runtime_sec, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_config"]) -> typing.Union['ToolConfig', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> typing.Union[MetaOapg.properties.content, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["output_data_path", "model_config", "input_data_path", "labels", "data_parallelism", "max_runtime_sec", "priority", "tool_config", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "content", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + model_config: 'BatchCompletionsModelConfig', + output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], + input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, + data_parallelism: typing.Union[MetaOapg.properties.data_parallelism, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_runtime_sec: typing.Union[MetaOapg.properties.max_runtime_sec, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, + tool_config: typing.Union['ToolConfig', schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchCompletionsV2Request': + return super().__new__( + cls, + *_args, + model_config=model_config, + output_data_path=output_data_path, + input_data_path=input_data_path, + labels=labels, + data_parallelism=data_parallelism, + max_runtime_sec=max_runtime_sec, + priority=priority, + tool_config=tool_config, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + content=content, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.batch_completions_model_config import ( + BatchCompletionsModelConfig, +) +from launch.api_client.model.create_batch_completions_v1_request_content import ( + CreateBatchCompletionsV1RequestContent, +) +from launch.api_client.model.filtered_chat_completion_v2_request import ( + FilteredChatCompletionV2Request, +) +from launch.api_client.model.filtered_completion_v2_request import ( + FilteredCompletionV2Request, +) +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.tool_config import ToolConfig diff --git a/launch/api_client/model/create_batch_job_resource_requests.py b/launch/api_client/model/create_batch_job_resource_requests.py index 48c4adb1..ec6cecad 100644 --- a/launch/api_client/model/create_batch_job_resource_requests.py +++ b/launch/api_client/model/create_batch_job_resource_requests.py @@ -23,23 +23,31 @@ from launch.api_client import schemas # noqa: F401 -class CreateBatchJobResourceRequests(schemas.DictSchema): +class CreateBatchJobResourceRequests( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: + class properties: + + class cpus( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -55,67 +63,32 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - - gpus = schemas.IntSchema - max_workers = schemas.IntSchema - + + class memory( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -131,62 +104,56 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - per_worker = schemas.IntSchema - + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + class storage( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -202,274 +169,181 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class max_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class concurrent_requests_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'concurrent_requests_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "cpus": cpus, - "gpu_type": gpu_type, + "memory": memory, "gpus": gpus, + "gpu_type": gpu_type, + "storage": storage, "max_workers": max_workers, - "memory": memory, "per_worker": per_worker, - "storage": storage, + "concurrent_requests_per_worker": concurrent_requests_per_worker, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": - ... - + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "max_workers", - "memory", - "per_worker", - "storage", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "max_workers", "per_worker", "concurrent_requests_per_worker", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_workers"] - ) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["per_worker"] - ) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "max_workers", - "memory", - "per_worker", - "storage", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> typing.Union[MetaOapg.properties.concurrent_requests_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "max_workers", "per_worker", "concurrent_requests_per_worker", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchJobResourceRequests": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchJobResourceRequests': return super().__new__( cls, *_args, cpus=cpus, - gpu_type=gpu_type, + memory=memory, gpus=gpus, + gpu_type=gpu_type, + storage=storage, max_workers=max_workers, - memory=memory, per_worker=per_worker, - storage=storage, + concurrent_requests_per_worker=concurrent_requests_per_worker, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/create_batch_job_resource_requests.pyi b/launch/api_client/model/create_batch_job_resource_requests.pyi deleted file mode 100644 index c3f466a3..00000000 --- a/launch/api_client/model/create_batch_job_resource_requests.pyi +++ /dev/null @@ -1,430 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateBatchJobResourceRequests(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - gpus = schemas.IntSchema - max_workers = schemas.IntSchema - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - per_worker = schemas.IntSchema - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "cpus": cpus, - "gpu_type": gpu_type, - "gpus": gpus, - "max_workers": max_workers, - "memory": memory, - "per_worker": per_worker, - "storage": storage, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "max_workers", - "memory", - "per_worker", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_workers"] - ) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["per_worker"] - ) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "max_workers", - "memory", - "per_worker", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchJobResourceRequests": - return super().__new__( - cls, - *_args, - cpus=cpus, - gpu_type=gpu_type, - gpus=gpus, - max_workers=max_workers, - memory=memory, - per_worker=per_worker, - storage=storage, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/create_batch_job_v1_request.py b/launch/api_client/model/create_batch_job_v1_request.py index 9232b052..82af83c1 100644 --- a/launch/api_client/model/create_batch_job_v1_request.py +++ b/launch/api_client/model/create_batch_job_v1_request.py @@ -23,13 +23,16 @@ from launch.api_client import schemas # noqa: F401 -class CreateBatchJobV1Request(schemas.DictSchema): +class CreateBatchJobV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "model_bundle_id", @@ -38,205 +41,126 @@ class MetaOapg: "input_path", "labels", } - + class properties: + model_bundle_id = schemas.StrSchema input_path = schemas.StrSchema - - class labels(schemas.DictSchema): + + @staticmethod + def serialization_format() -> typing.Type['BatchJobSerializationFormat']: + return BatchJobSerializationFormat + + + class labels( + schemas.DictSchema + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - model_bundle_id = schemas.StrSchema - + @staticmethod - def resource_requests() -> typing.Type["CreateBatchJobResourceRequests"]: + def resource_requests() -> typing.Type['CreateBatchJobResourceRequests']: return CreateBatchJobResourceRequests - - @staticmethod - def serialization_format() -> typing.Type["BatchJobSerializationFormat"]: - return BatchJobSerializationFormat - timeout_seconds = schemas.NumberSchema __annotations__ = { + "model_bundle_id": model_bundle_id, "input_path": input_path, + "serialization_format": serialization_format, "labels": labels, - "model_bundle_id": model_bundle_id, "resource_requests": resource_requests, - "serialization_format": serialization_format, "timeout_seconds": timeout_seconds, } - + model_bundle_id: MetaOapg.properties.model_bundle_id - resource_requests: "CreateBatchJobResourceRequests" - serialization_format: "BatchJobSerializationFormat" + resource_requests: 'CreateBatchJobResourceRequests' + serialization_format: 'BatchJobSerializationFormat' input_path: MetaOapg.properties.input_path labels: MetaOapg.properties.labels - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def __getitem__(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["serialization_format"]) -> 'BatchJobSerializationFormat': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> "CreateBatchJobResourceRequests": - ... - + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["serialization_format"]) -> "BatchJobSerializationFormat": - ... - + def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateBatchJobResourceRequests': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timeout_seconds"]) -> MetaOapg.properties.timeout_seconds: - ... - + def __getitem__(self, name: typing_extensions.Literal["timeout_seconds"]) -> MetaOapg.properties.timeout_seconds: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "input_path", - "labels", - "model_bundle_id", - "resource_requests", - "serialization_format", - "timeout_seconds", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "input_path", "serialization_format", "labels", "resource_requests", "timeout_seconds", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["serialization_format"]) -> 'BatchJobSerializationFormat': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["resource_requests"]) -> "CreateBatchJobResourceRequests": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["serialization_format"]) -> "BatchJobSerializationFormat": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateBatchJobResourceRequests': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["timeout_seconds"] - ) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["timeout_seconds"]) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "input_path", - "labels", - "model_bundle_id", - "resource_requests", - "serialization_format", - "timeout_seconds", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "input_path", "serialization_format", "labels", "resource_requests", "timeout_seconds", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], - resource_requests: "CreateBatchJobResourceRequests", - serialization_format: "BatchJobSerializationFormat", - input_path: typing.Union[ - MetaOapg.properties.input_path, - str, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - timeout_seconds: typing.Union[ - MetaOapg.properties.timeout_seconds, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], + resource_requests: 'CreateBatchJobResourceRequests', + serialization_format: 'BatchJobSerializationFormat', + input_path: typing.Union[MetaOapg.properties.input_path, str, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + timeout_seconds: typing.Union[MetaOapg.properties.timeout_seconds, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchJobV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchJobV1Request': return super().__new__( cls, *_args, @@ -250,7 +174,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.batch_job_serialization_format import ( BatchJobSerializationFormat, ) diff --git a/launch/api_client/model/create_batch_job_v1_request.pyi b/launch/api_client/model/create_batch_job_v1_request.pyi deleted file mode 100644 index a350d626..00000000 --- a/launch/api_client/model/create_batch_job_v1_request.pyi +++ /dev/null @@ -1,229 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateBatchJobV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "model_bundle_id", - "resource_requests", - "serialization_format", - "input_path", - "labels", - } - - class properties: - input_path = schemas.StrSchema - - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - model_bundle_id = schemas.StrSchema - - @staticmethod - def resource_requests() -> typing.Type["CreateBatchJobResourceRequests"]: - return CreateBatchJobResourceRequests - @staticmethod - def serialization_format() -> typing.Type["BatchJobSerializationFormat"]: - return BatchJobSerializationFormat - timeout_seconds = schemas.NumberSchema - __annotations__ = { - "input_path": input_path, - "labels": labels, - "model_bundle_id": model_bundle_id, - "resource_requests": resource_requests, - "serialization_format": serialization_format, - "timeout_seconds": timeout_seconds, - } - model_bundle_id: MetaOapg.properties.model_bundle_id - resource_requests: "CreateBatchJobResourceRequests" - serialization_format: "BatchJobSerializationFormat" - input_path: MetaOapg.properties.input_path - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> "CreateBatchJobResourceRequests": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["serialization_format"]) -> "BatchJobSerializationFormat": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["timeout_seconds"] - ) -> MetaOapg.properties.timeout_seconds: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "input_path", - "labels", - "model_bundle_id", - "resource_requests", - "serialization_format", - "timeout_seconds", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["input_path"]) -> MetaOapg.properties.input_path: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["resource_requests"] - ) -> "CreateBatchJobResourceRequests": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["serialization_format"] - ) -> "BatchJobSerializationFormat": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["timeout_seconds"] - ) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "input_path", - "labels", - "model_bundle_id", - "resource_requests", - "serialization_format", - "timeout_seconds", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], - resource_requests: "CreateBatchJobResourceRequests", - serialization_format: "BatchJobSerializationFormat", - input_path: typing.Union[ - MetaOapg.properties.input_path, - str, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - timeout_seconds: typing.Union[ - MetaOapg.properties.timeout_seconds, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchJobV1Request": - return super().__new__( - cls, - *_args, - model_bundle_id=model_bundle_id, - resource_requests=resource_requests, - serialization_format=serialization_format, - input_path=input_path, - labels=labels, - timeout_seconds=timeout_seconds, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.batch_job_serialization_format import ( - BatchJobSerializationFormat, -) -from launch_client.model.create_batch_job_resource_requests import ( - CreateBatchJobResourceRequests, -) diff --git a/launch/api_client/model/create_batch_job_v1_response.py b/launch/api_client/model/create_batch_job_v1_response.py index 9aebd3da..59f232e2 100644 --- a/launch/api_client/model/create_batch_job_v1_response.py +++ b/launch/api_client/model/create_batch_job_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateBatchJobV1Response(schemas.DictSchema): +class CreateBatchJobV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "job_id", } - + class properties: job_id = schemas.StrSchema __annotations__ = { "job_id": job_id, } - + job_id: MetaOapg.properties.job_id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - job_id: typing.Union[ - MetaOapg.properties.job_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + job_id: typing.Union[MetaOapg.properties.job_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateBatchJobV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_chat_completion_response.py b/launch/api_client/model/create_chat_completion_response.py new file mode 100644 index 00000000..fdbcbf28 --- /dev/null +++ b/launch/api_client/model/create_chat_completion_response.py @@ -0,0 +1,230 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateChatCompletionResponse( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "created", + "model", + "id", + "choices", + "object", + } + + class properties: + id = schemas.StrSchema + + + class choices( + schemas.ListSchema + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['Choice']: + return Choice + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['Choice'], typing.List['Choice']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'choices': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'Choice': + return super().__getitem__(i) + created = schemas.IntSchema + model = schemas.StrSchema + + + class object( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "chat.completion": "CHAT_COMPLETION", + } + + @schemas.classproperty + def CHAT_COMPLETION(cls): + return cls("chat.completion") + + @staticmethod + def service_tier() -> typing.Type['ServiceTier']: + return ServiceTier + + + class system_fingerprint( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'system_fingerprint': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def usage() -> typing.Type['CompletionUsage']: + return CompletionUsage + __annotations__ = { + "id": id, + "choices": choices, + "created": created, + "model": model, + "object": object, + "service_tier": service_tier, + "system_fingerprint": system_fingerprint, + "usage": usage, + } + + created: MetaOapg.properties.created + model: MetaOapg.properties.model + id: MetaOapg.properties.id + choices: MetaOapg.properties.choices + object: MetaOapg.properties.object + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["system_fingerprint"]) -> MetaOapg.properties.system_fingerprint: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["usage"]) -> 'CompletionUsage': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["system_fingerprint"]) -> typing.Union[MetaOapg.properties.system_fingerprint, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["usage"]) -> typing.Union['CompletionUsage', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + created: typing.Union[MetaOapg.properties.created, decimal.Decimal, int, ], + model: typing.Union[MetaOapg.properties.model, str, ], + id: typing.Union[MetaOapg.properties.id, str, ], + choices: typing.Union[MetaOapg.properties.choices, list, tuple, ], + object: typing.Union[MetaOapg.properties.object, str, ], + service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, + system_fingerprint: typing.Union[MetaOapg.properties.system_fingerprint, None, str, schemas.Unset] = schemas.unset, + usage: typing.Union['CompletionUsage', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateChatCompletionResponse': + return super().__new__( + cls, + *_args, + created=created, + model=model, + id=id, + choices=choices, + object=object, + service_tier=service_tier, + system_fingerprint=system_fingerprint, + usage=usage, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.choice import Choice +from launch.api_client.model.completion_usage import CompletionUsage +from launch.api_client.model.service_tier import ServiceTier diff --git a/launch/api_client/model/create_chat_completion_stream_response.py b/launch/api_client/model/create_chat_completion_stream_response.py new file mode 100644 index 00000000..b0b83707 --- /dev/null +++ b/launch/api_client/model/create_chat_completion_stream_response.py @@ -0,0 +1,230 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateChatCompletionStreamResponse( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "created", + "model", + "id", + "choices", + "object", + } + + class properties: + id = schemas.StrSchema + + + class choices( + schemas.ListSchema + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['Choice1']: + return Choice1 + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['Choice1'], typing.List['Choice1']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'choices': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'Choice1': + return super().__getitem__(i) + created = schemas.IntSchema + model = schemas.StrSchema + + + class object( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "chat.completion.chunk": "CHAT_COMPLETION_CHUNK", + } + + @schemas.classproperty + def CHAT_COMPLETION_CHUNK(cls): + return cls("chat.completion.chunk") + + @staticmethod + def service_tier() -> typing.Type['ServiceTier']: + return ServiceTier + + + class system_fingerprint( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'system_fingerprint': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def usage() -> typing.Type['CompletionUsage']: + return CompletionUsage + __annotations__ = { + "id": id, + "choices": choices, + "created": created, + "model": model, + "object": object, + "service_tier": service_tier, + "system_fingerprint": system_fingerprint, + "usage": usage, + } + + created: MetaOapg.properties.created + model: MetaOapg.properties.model + id: MetaOapg.properties.id + choices: MetaOapg.properties.choices + object: MetaOapg.properties.object + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["system_fingerprint"]) -> MetaOapg.properties.system_fingerprint: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["usage"]) -> 'CompletionUsage': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["system_fingerprint"]) -> typing.Union[MetaOapg.properties.system_fingerprint, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["usage"]) -> typing.Union['CompletionUsage', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "service_tier", "system_fingerprint", "usage", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + created: typing.Union[MetaOapg.properties.created, decimal.Decimal, int, ], + model: typing.Union[MetaOapg.properties.model, str, ], + id: typing.Union[MetaOapg.properties.id, str, ], + choices: typing.Union[MetaOapg.properties.choices, list, tuple, ], + object: typing.Union[MetaOapg.properties.object, str, ], + service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, + system_fingerprint: typing.Union[MetaOapg.properties.system_fingerprint, None, str, schemas.Unset] = schemas.unset, + usage: typing.Union['CompletionUsage', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateChatCompletionStreamResponse': + return super().__new__( + cls, + *_args, + created=created, + model=model, + id=id, + choices=choices, + object=object, + service_tier=service_tier, + system_fingerprint=system_fingerprint, + usage=usage, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.choice1 import Choice1 +from launch.api_client.model.completion_usage import CompletionUsage +from launch.api_client.model.service_tier import ServiceTier diff --git a/launch/api_client/model/create_completion_response.py b/launch/api_client/model/create_completion_response.py new file mode 100644 index 00000000..963b10b1 --- /dev/null +++ b/launch/api_client/model/create_completion_response.py @@ -0,0 +1,216 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateCompletionResponse( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "created", + "model", + "id", + "choices", + "object", + } + + class properties: + id = schemas.StrSchema + + + class choices( + schemas.ListSchema + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['Choice2']: + return Choice2 + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['Choice2'], typing.List['Choice2']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'choices': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'Choice2': + return super().__getitem__(i) + created = schemas.IntSchema + model = schemas.StrSchema + + + class object( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "text_completion": "TEXT_COMPLETION", + } + + @schemas.classproperty + def TEXT_COMPLETION(cls): + return cls("text_completion") + + + class system_fingerprint( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'system_fingerprint': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def usage() -> typing.Type['CompletionUsage']: + return CompletionUsage + __annotations__ = { + "id": id, + "choices": choices, + "created": created, + "model": model, + "object": object, + "system_fingerprint": system_fingerprint, + "usage": usage, + } + + created: MetaOapg.properties.created + model: MetaOapg.properties.model + id: MetaOapg.properties.id + choices: MetaOapg.properties.choices + object: MetaOapg.properties.object + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["system_fingerprint"]) -> MetaOapg.properties.system_fingerprint: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["usage"]) -> 'CompletionUsage': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "system_fingerprint", "usage", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["choices"]) -> MetaOapg.properties.choices: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["created"]) -> MetaOapg.properties.created: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["object"]) -> MetaOapg.properties.object: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["system_fingerprint"]) -> typing.Union[MetaOapg.properties.system_fingerprint, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["usage"]) -> typing.Union['CompletionUsage', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "choices", "created", "model", "object", "system_fingerprint", "usage", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + created: typing.Union[MetaOapg.properties.created, decimal.Decimal, int, ], + model: typing.Union[MetaOapg.properties.model, str, ], + id: typing.Union[MetaOapg.properties.id, str, ], + choices: typing.Union[MetaOapg.properties.choices, list, tuple, ], + object: typing.Union[MetaOapg.properties.object, str, ], + system_fingerprint: typing.Union[MetaOapg.properties.system_fingerprint, None, str, schemas.Unset] = schemas.unset, + usage: typing.Union['CompletionUsage', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateCompletionResponse': + return super().__new__( + cls, + *_args, + created=created, + model=model, + id=id, + choices=choices, + object=object, + system_fingerprint=system_fingerprint, + usage=usage, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.choice2 import Choice2 +from launch.api_client.model.completion_usage import CompletionUsage diff --git a/launch/api_client/model/create_deep_speed_model_endpoint_request.py b/launch/api_client/model/create_deep_speed_model_endpoint_request.py new file mode 100644 index 00000000..a6aa457b --- /dev/null +++ b/launch/api_client/model/create_deep_speed_model_endpoint_request.py @@ -0,0 +1,842 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateDeepSpeedModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "metadata", + "model_name", + "max_workers", + "min_workers", + "name", + "per_worker", + "labels", + } + + class properties: + name = schemas.StrSchema + model_name = schemas.StrSchema + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + min_workers = schemas.IntSchema + max_workers = schemas.IntSchema + per_worker = schemas.IntSchema + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + inference_framework_image_tag = schemas.StrSchema + num_shards = schemas.IntSchema + + @staticmethod + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "deepspeed": "DEEPSPEED", + } + + @schemas.classproperty + def DEEPSPEED(cls): + return cls("deepspeed") + __annotations__ = { + "name": name, + "model_name": model_name, + "metadata": metadata, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "source": source, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "endpoint_type": endpoint_type, + "inference_framework": inference_framework, + } + + metadata: MetaOapg.properties.metadata + model_name: MetaOapg.properties.model_name + max_workers: MetaOapg.properties.max_workers + min_workers: MetaOapg.properties.min_workers + name: MetaOapg.properties.name + per_worker: MetaOapg.properties.per_worker + labels: MetaOapg.properties.labels + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + name: typing.Union[MetaOapg.properties.name, str, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, + endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateDeepSpeedModelEndpointRequest': + return super().__new__( + cls, + *_args, + metadata=metadata, + model_name=model_name, + max_workers=max_workers, + min_workers=min_workers, + name=name, + per_worker=per_worker, + labels=labels, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + source=source, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + endpoint_type=endpoint_type, + inference_framework=inference_framework, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.model_endpoint_type import ModelEndpointType +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py index 392a4a02..3cbb634f 100644 --- a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py +++ b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.py @@ -23,13 +23,16 @@ from launch.api_client import schemas # noqa: F401 -class CreateDockerImageBatchJobBundleV1Request(schemas.DictSchema): +class CreateDockerImageBatchJobBundleV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "image_repository", @@ -37,346 +40,200 @@ class MetaOapg: "image_tag", "command", } - + class properties: - class command(schemas.ListSchema): + name = schemas.StrSchema + image_repository = schemas.StrSchema + image_tag = schemas.StrSchema + + + class command( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": + ) -> 'command': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - name = schemas.StrSchema - - class env(schemas.DictSchema): + + + class env( + schemas.DictSchema + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'env': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - mount_location = schemas.StrSchema - public = schemas.BoolSchema - - class resource_requests( - schemas.ComposedSchema, + + + class mount_location( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateDockerImageBatchJobResourceRequests, - ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[None, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "resource_requests": + ) -> 'mount_location': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def resource_requests() -> typing.Type['CreateDockerImageBatchJobResourceRequests']: + return CreateDockerImageBatchJobResourceRequests + + + class public( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public': return super().__new__( cls, *_args, _configuration=_configuration, - **kwargs, ) - __annotations__ = { - "command": command, + "name": name, "image_repository": image_repository, "image_tag": image_tag, - "name": name, + "command": command, "env": env, "mount_location": mount_location, - "public": public, "resource_requests": resource_requests, + "public": public, } - + image_repository: MetaOapg.properties.image_repository name: MetaOapg.properties.name image_tag: MetaOapg.properties.image_tag command: MetaOapg.properties.command - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: - ... - + def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: - ... - + def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: - ... - + def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: - ... - + def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateDockerImageBatchJobResourceRequests': ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["resource_requests"] - ) -> MetaOapg.properties.resource_requests: - ... - + def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "image_repository", - "image_tag", - "name", - "env", - "mount_location", - "public", - "resource_requests", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "image_repository", "image_tag", "command", "env", "mount_location", "resource_requests", "public", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["mount_location"] - ) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["mount_location"]) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public"] - ) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["resource_requests"]) -> typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["resource_requests"] - ) -> typing.Union[MetaOapg.properties.resource_requests, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["public"]) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "image_repository", - "image_tag", - "name", - "env", - "mount_location", - "public", - "resource_requests", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "image_repository", "image_tag", "command", "env", "mount_location", "resource_requests", "public", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - image_repository: typing.Union[ - MetaOapg.properties.image_repository, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - image_tag: typing.Union[ - MetaOapg.properties.image_tag, - str, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + image_repository: typing.Union[MetaOapg.properties.image_repository, str, ], + name: typing.Union[MetaOapg.properties.name, str, ], + image_tag: typing.Union[MetaOapg.properties.image_tag, str, ], + command: typing.Union[MetaOapg.properties.command, list, tuple, ], env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - mount_location: typing.Union[MetaOapg.properties.mount_location, str, schemas.Unset] = schemas.unset, - public: typing.Union[MetaOapg.properties.public, bool, schemas.Unset] = schemas.unset, - resource_requests: typing.Union[ - MetaOapg.properties.resource_requests, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + mount_location: typing.Union[MetaOapg.properties.mount_location, None, str, schemas.Unset] = schemas.unset, + resource_requests: typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset] = schemas.unset, + public: typing.Union[MetaOapg.properties.public, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobBundleV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateDockerImageBatchJobBundleV1Request': return super().__new__( cls, *_args, @@ -386,13 +243,12 @@ def __new__( command=command, env=env, mount_location=mount_location, - public=public, resource_requests=resource_requests, + public=public, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.create_docker_image_batch_job_resource_requests import ( CreateDockerImageBatchJobResourceRequests, ) diff --git a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.pyi b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.pyi deleted file mode 100644 index 66f3656f..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_request.pyi +++ /dev/null @@ -1,349 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateDockerImageBatchJobBundleV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "image_repository", - "name", - "image_tag", - "command", - } - - class properties: - class command(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - name = schemas.StrSchema - - class env(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - mount_location = schemas.StrSchema - public = schemas.BoolSchema - - class resource_requests( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateDockerImageBatchJobResourceRequests, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "resource_requests": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "command": command, - "image_repository": image_repository, - "image_tag": image_tag, - "name": name, - "env": env, - "mount_location": mount_location, - "public": public, - "resource_requests": resource_requests, - } - image_repository: MetaOapg.properties.image_repository - name: MetaOapg.properties.name - image_tag: MetaOapg.properties.image_tag - command: MetaOapg.properties.command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["resource_requests"] - ) -> MetaOapg.properties.resource_requests: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "image_repository", - "image_tag", - "name", - "env", - "mount_location", - "public", - "resource_requests", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["mount_location"] - ) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public"] - ) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["resource_requests"] - ) -> typing.Union[MetaOapg.properties.resource_requests, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "image_repository", - "image_tag", - "name", - "env", - "mount_location", - "public", - "resource_requests", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - image_repository: typing.Union[ - MetaOapg.properties.image_repository, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - image_tag: typing.Union[ - MetaOapg.properties.image_tag, - str, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - mount_location: typing.Union[MetaOapg.properties.mount_location, str, schemas.Unset] = schemas.unset, - public: typing.Union[MetaOapg.properties.public, bool, schemas.Unset] = schemas.unset, - resource_requests: typing.Union[ - MetaOapg.properties.resource_requests, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobBundleV1Request": - return super().__new__( - cls, - *_args, - image_repository=image_repository, - name=name, - image_tag=image_tag, - command=command, - env=env, - mount_location=mount_location, - public=public, - resource_requests=resource_requests, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) diff --git a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py index 96572cec..03394c6a 100644 --- a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py +++ b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.py @@ -23,93 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateDockerImageBatchJobBundleV1Response(schemas.DictSchema): +class CreateDockerImageBatchJobBundleV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "docker_image_batch_job_bundle_id", } - + class properties: docker_image_batch_job_bundle_id = schemas.StrSchema __annotations__ = { "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, } - + docker_image_batch_job_bundle_id: MetaOapg.properties.docker_image_batch_job_bundle_id - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundle_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundle_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundle_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundle_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - docker_image_batch_job_bundle_id: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + docker_image_batch_job_bundle_id: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobBundleV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateDockerImageBatchJobBundleV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.pyi b/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.pyi deleted file mode 100644 index 04eb96f5..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_bundle_v1_response.pyi +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateDockerImageBatchJobBundleV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "docker_image_batch_job_bundle_id", - } - - class properties: - docker_image_batch_job_bundle_id = schemas.StrSchema - __annotations__ = { - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, - } - docker_image_batch_job_bundle_id: MetaOapg.properties.docker_image_batch_job_bundle_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundle_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundle_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - docker_image_batch_job_bundle_id: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobBundleV1Response": - return super().__new__( - cls, - *_args, - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_docker_image_batch_job_resource_requests.py b/launch/api_client/model/create_docker_image_batch_job_resource_requests.py index 7acafdf4..a31aef74 100644 --- a/launch/api_client/model/create_docker_image_batch_job_resource_requests.py +++ b/launch/api_client/model/create_docker_image_batch_job_resource_requests.py @@ -23,23 +23,31 @@ from launch.api_client import schemas # noqa: F401 -class CreateDockerImageBatchJobResourceRequests(schemas.DictSchema): +class CreateDockerImageBatchJobResourceRequests( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: + class properties: + + class cpus( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -55,66 +63,32 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - - gpus = schemas.IntSchema - + + class memory( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -130,60 +104,56 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + class storage( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -199,244 +169,123 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "cpus": cpus, - "gpu_type": gpu_type, - "gpus": gpus, "memory": memory, + "gpus": gpus, + "gpu_type": gpu_type, "storage": storage, + "nodes_per_worker": nodes_per_worker, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": - ... - + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "memory", - "storage", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "nodes_per_worker", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "memory", - "storage", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cpus", "memory", "gpus", "gpu_type", "storage", "nodes_per_worker", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobResourceRequests": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateDockerImageBatchJobResourceRequests': return super().__new__( cls, *_args, cpus=cpus, - gpu_type=gpu_type, - gpus=gpus, memory=memory, + gpus=gpus, + gpu_type=gpu_type, storage=storage, + nodes_per_worker=nodes_per_worker, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/create_docker_image_batch_job_resource_requests.pyi b/launch/api_client/model/create_docker_image_batch_job_resource_requests.pyi deleted file mode 100644 index ff616d37..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_resource_requests.pyi +++ /dev/null @@ -1,406 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateDockerImageBatchJobResourceRequests(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - gpus = schemas.IntSchema - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "cpus": cpus, - "gpu_type": gpu_type, - "gpus": gpus, - "memory": memory, - "storage": storage, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "memory", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpu_type", - "gpus", - "memory", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobResourceRequests": - return super().__new__( - cls, - *_args, - cpus=cpus, - gpu_type=gpu_type, - gpus=gpus, - memory=memory, - storage=storage, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/create_docker_image_batch_job_v1_request.py b/launch/api_client/model/create_docker_image_batch_job_v1_request.py index fb3f0833..58029c8e 100644 --- a/launch/api_client/model/create_docker_image_batch_job_v1_request.py +++ b/launch/api_client/model/create_docker_image_batch_job_v1_request.py @@ -23,316 +23,235 @@ from launch.api_client import schemas # noqa: F401 -class CreateDockerImageBatchJobV1Request(schemas.DictSchema): +class CreateDockerImageBatchJobV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "labels", } - + class properties: - class labels(schemas.DictSchema): + + + class labels( + schemas.DictSchema + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - docker_image_batch_job_bundle_id = schemas.StrSchema - docker_image_batch_job_bundle_name = schemas.StrSchema - job_config = schemas.DictSchema - override_job_max_runtime_s = schemas.IntSchema - - class resource_requests( - schemas.ComposedSchema, + + + class docker_image_batch_job_bundle_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'docker_image_batch_job_bundle_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class docker_image_batch_job_bundle_id( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'docker_image_batch_job_bundle_id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class job_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateDockerImageBatchJobResourceRequests, - ] - + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "resource_requests": + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'job_config': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + @staticmethod + def resource_requests() -> typing.Type['CreateDockerImageBatchJobResourceRequests']: + return CreateDockerImageBatchJobResourceRequests + + + class override_job_max_runtime_s( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'override_job_max_runtime_s': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "labels": labels, - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, "docker_image_batch_job_bundle_name": docker_image_batch_job_bundle_name, + "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, "job_config": job_config, - "override_job_max_runtime_s": override_job_max_runtime_s, "resource_requests": resource_requests, + "override_job_max_runtime_s": override_job_max_runtime_s, } - + labels: MetaOapg.properties.labels - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"]) -> MetaOapg.properties.docker_image_batch_job_bundle_name: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_name: - ... - + def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_config"]) -> MetaOapg.properties.job_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["job_config"]) -> MetaOapg.properties.job_config: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> MetaOapg.properties.override_job_max_runtime_s: - ... - + def __getitem__(self, name: typing_extensions.Literal["resource_requests"]) -> 'CreateDockerImageBatchJobResourceRequests': ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["resource_requests"] - ) -> MetaOapg.properties.resource_requests: - ... - + def __getitem__(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> MetaOapg.properties.override_job_max_runtime_s: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "docker_image_batch_job_bundle_id", - "docker_image_batch_job_bundle_name", - "job_config", - "override_job_max_runtime_s", - "resource_requests", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["labels", "docker_image_batch_job_bundle_name", "docker_image_batch_job_bundle_id", "job_config", "resource_requests", "override_job_max_runtime_s", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"]) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_name, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"] - ) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_name, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["job_config"] - ) -> typing.Union[MetaOapg.properties.job_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["job_config"]) -> typing.Union[MetaOapg.properties.job_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["resource_requests"]) -> typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["resource_requests"] - ) -> typing.Union[MetaOapg.properties.resource_requests, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "docker_image_batch_job_bundle_id", - "docker_image_batch_job_bundle_name", - "job_config", - "override_job_max_runtime_s", - "resource_requests", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["labels", "docker_image_batch_job_bundle_name", "docker_image_batch_job_bundle_id", "job_config", "resource_requests", "override_job_max_runtime_s", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - docker_image_batch_job_bundle_id: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_id, str, schemas.Unset - ] = schemas.unset, - docker_image_batch_job_bundle_name: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_name, str, schemas.Unset - ] = schemas.unset, - job_config: typing.Union[ - MetaOapg.properties.job_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - override_job_max_runtime_s: typing.Union[ - MetaOapg.properties.override_job_max_runtime_s, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - resource_requests: typing.Union[ - MetaOapg.properties.resource_requests, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + docker_image_batch_job_bundle_name: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_name, None, str, schemas.Unset] = schemas.unset, + docker_image_batch_job_bundle_id: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, None, str, schemas.Unset] = schemas.unset, + job_config: typing.Union[MetaOapg.properties.job_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + resource_requests: typing.Union['CreateDockerImageBatchJobResourceRequests', schemas.Unset] = schemas.unset, + override_job_max_runtime_s: typing.Union[MetaOapg.properties.override_job_max_runtime_s, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateDockerImageBatchJobV1Request': return super().__new__( cls, *_args, labels=labels, - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, docker_image_batch_job_bundle_name=docker_image_batch_job_bundle_name, + docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, job_config=job_config, - override_job_max_runtime_s=override_job_max_runtime_s, resource_requests=resource_requests, + override_job_max_runtime_s=override_job_max_runtime_s, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.create_docker_image_batch_job_resource_requests import ( CreateDockerImageBatchJobResourceRequests, ) diff --git a/launch/api_client/model/create_docker_image_batch_job_v1_request.pyi b/launch/api_client/model/create_docker_image_batch_job_v1_request.pyi deleted file mode 100644 index 5d20a3a8..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_v1_request.pyi +++ /dev/null @@ -1,298 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateDockerImageBatchJobV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "labels", - } - - class properties: - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - docker_image_batch_job_bundle_id = schemas.StrSchema - docker_image_batch_job_bundle_name = schemas.StrSchema - job_config = schemas.DictSchema - override_job_max_runtime_s = schemas.IntSchema - - class resource_requests( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CreateDockerImageBatchJobResourceRequests, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "resource_requests": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "labels": labels, - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, - "docker_image_batch_job_bundle_name": docker_image_batch_job_bundle_name, - "job_config": job_config, - "override_job_max_runtime_s": override_job_max_runtime_s, - "resource_requests": resource_requests, - } - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_config"]) -> MetaOapg.properties.job_config: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> MetaOapg.properties.override_job_max_runtime_s: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["resource_requests"] - ) -> MetaOapg.properties.resource_requests: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "docker_image_batch_job_bundle_id", - "docker_image_batch_job_bundle_name", - "job_config", - "override_job_max_runtime_s", - "resource_requests", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_name"] - ) -> typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_name, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["job_config"] - ) -> typing.Union[MetaOapg.properties.job_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["resource_requests"] - ) -> typing.Union[MetaOapg.properties.resource_requests, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "docker_image_batch_job_bundle_id", - "docker_image_batch_job_bundle_name", - "job_config", - "override_job_max_runtime_s", - "resource_requests", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - docker_image_batch_job_bundle_id: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_id, str, schemas.Unset - ] = schemas.unset, - docker_image_batch_job_bundle_name: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_name, str, schemas.Unset - ] = schemas.unset, - job_config: typing.Union[ - MetaOapg.properties.job_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - override_job_max_runtime_s: typing.Union[ - MetaOapg.properties.override_job_max_runtime_s, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - resource_requests: typing.Union[ - MetaOapg.properties.resource_requests, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobV1Request": - return super().__new__( - cls, - *_args, - labels=labels, - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - docker_image_batch_job_bundle_name=docker_image_batch_job_bundle_name, - job_config=job_config, - override_job_max_runtime_s=override_job_max_runtime_s, - resource_requests=resource_requests, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.create_docker_image_batch_job_resource_requests import ( - CreateDockerImageBatchJobResourceRequests, -) diff --git a/launch/api_client/model/create_docker_image_batch_job_v1_response.py b/launch/api_client/model/create_docker_image_batch_job_v1_response.py index 19555a30..f90e3731 100644 --- a/launch/api_client/model/create_docker_image_batch_job_v1_response.py +++ b/launch/api_client/model/create_docker_image_batch_job_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateDockerImageBatchJobV1Response(schemas.DictSchema): +class CreateDockerImageBatchJobV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "job_id", } - + class properties: job_id = schemas.StrSchema __annotations__ = { "job_id": job_id, } - + job_id: MetaOapg.properties.job_id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - job_id: typing.Union[ - MetaOapg.properties.job_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + job_id: typing.Union[MetaOapg.properties.job_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateDockerImageBatchJobV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_docker_image_batch_job_v1_response.pyi b/launch/api_client/model/create_docker_image_batch_job_v1_response.pyi deleted file mode 100644 index 08f87524..00000000 --- a/launch/api_client/model/create_docker_image_batch_job_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateDockerImageBatchJobV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "job_id", - } - - class properties: - job_id = schemas.StrSchema - __annotations__ = { - "job_id": job_id, - } - job_id: MetaOapg.properties.job_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - job_id: typing.Union[ - MetaOapg.properties.job_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateDockerImageBatchJobV1Response": - return super().__new__( - cls, - *_args, - job_id=job_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_job_request.py b/launch/api_client/model/create_fine_tune_job_request.py index eb4dd736..ec449348 100644 --- a/launch/api_client/model/create_fine_tune_job_request.py +++ b/launch/api_client/model/create_fine_tune_job_request.py @@ -44,14 +44,18 @@ class MetaOapg: def __getitem__( self, - name: typing.Union[str,], + name: typing.Union[ + str, + ], ) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) def get_item_oapg( self, - name: typing.Union[str,], + name: typing.Union[ + str, + ], ) -> MetaOapg.additional_properties: return super().get_item_oapg(name) diff --git a/launch/api_client/model/create_fine_tune_job_request.pyi b/launch/api_client/model/create_fine_tune_job_request.pyi deleted file mode 100644 index 0be4c928..00000000 --- a/launch/api_client/model/create_fine_tune_job_request.pyi +++ /dev/null @@ -1,198 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateFineTuneRequest(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "training_file", - "hyperparameters", - "model", - } - - class properties: - class hyperparameters(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "hyperparameters": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - model = schemas.StrSchema - training_file = schemas.StrSchema - suffix = schemas.StrSchema - validation_file = schemas.StrSchema - __annotations__ = { - "hyperparameters": hyperparameters, - "model": model, - "training_file": training_file, - "suffix": suffix, - "validation_file": validation_file, - } - training_file: MetaOapg.properties.training_file - hyperparameters: MetaOapg.properties.hyperparameters - model: MetaOapg.properties.model - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["hyperparameters"] - ) -> MetaOapg.properties.hyperparameters: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["validation_file"] - ) -> MetaOapg.properties.validation_file: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["hyperparameters"] - ) -> MetaOapg.properties.hyperparameters: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["suffix"] - ) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["validation_file"] - ) -> typing.Union[MetaOapg.properties.validation_file, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - training_file: typing.Union[ - MetaOapg.properties.training_file, - str, - ], - hyperparameters: typing.Union[ - MetaOapg.properties.hyperparameters, - dict, - frozendict.frozendict, - ], - model: typing.Union[ - MetaOapg.properties.model, - str, - ], - suffix: typing.Union[MetaOapg.properties.suffix, str, schemas.Unset] = schemas.unset, - validation_file: typing.Union[MetaOapg.properties.validation_file, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneRequest": - return super().__new__( - cls, - *_args, - training_file=training_file, - hyperparameters=hyperparameters, - model=model, - suffix=suffix, - validation_file=validation_file, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_job_response.py b/launch/api_client/model/create_fine_tune_job_response.py index c59ec4b9..3a1d3224 100644 --- a/launch/api_client/model/create_fine_tune_job_response.py +++ b/launch/api_client/model/create_fine_tune_job_response.py @@ -54,7 +54,9 @@ def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: def __getitem__( self, name: typing.Union[ - typing_extensions.Literal["fine_tune_id",], + typing_extensions.Literal[ + "fine_tune_id", + ], str, ], ): @@ -72,7 +74,9 @@ def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, s def get_item_oapg( self, name: typing.Union[ - typing_extensions.Literal["fine_tune_id",], + typing_extensions.Literal[ + "fine_tune_id", + ], str, ], ): diff --git a/launch/api_client/model/create_fine_tune_job_response.pyi b/launch/api_client/model/create_fine_tune_job_response.pyi deleted file mode 100644 index bf7034f4..00000000 --- a/launch/api_client/model/create_fine_tune_job_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateFineTuneResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "fine_tune_id", - } - - class properties: - fine_tune_id = schemas.StrSchema - __annotations__ = { - "fine_tune_id": fine_tune_id, - } - fine_tune_id: MetaOapg.properties.fine_tune_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["fine_tune_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["fine_tune_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - fine_tune_id: typing.Union[ - MetaOapg.properties.fine_tune_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneResponse": - return super().__new__( - cls, - *_args, - fine_tune_id=fine_tune_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_request.py b/launch/api_client/model/create_fine_tune_request.py index 2bc359fc..0672053c 100644 --- a/launch/api_client/model/create_fine_tune_request.py +++ b/launch/api_client/model/create_fine_tune_request.py @@ -23,32 +23,75 @@ from launch.api_client import schemas # noqa: F401 -class CreateFineTuneRequest(schemas.DictSchema): +class CreateFineTuneRequest( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "training_file", "hyperparameters", "model", } - + class properties: - class hyperparameters(schemas.DictSchema): + model = schemas.StrSchema + training_file = schemas.StrSchema + + + class hyperparameters( + schemas.DictSchema + ): + + class MetaOapg: + + class additional_properties( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - any_of_3 = schemas.DictSchema - + + + class any_of_3( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'any_of_3': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + @classmethod @functools.lru_cache() def any_of(cls): @@ -65,262 +108,197 @@ def any_of(cls): cls.any_of_2, cls.any_of_3, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "additional_properties": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'additional_properties': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - ) -> "hyperparameters": + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'hyperparameters': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class validation_file( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'validation_file': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class suffix( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'suffix': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class wandb_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'wandb_config': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - model = schemas.StrSchema - training_file = schemas.StrSchema - suffix = schemas.StrSchema - validation_file = schemas.StrSchema - wandb_config = schemas.DictSchema __annotations__ = { - "hyperparameters": hyperparameters, "model": model, "training_file": training_file, - "suffix": suffix, + "hyperparameters": hyperparameters, "validation_file": validation_file, + "suffix": suffix, "wandb_config": wandb_config, } - + training_file: MetaOapg.properties.training_file hyperparameters: MetaOapg.properties.hyperparameters model: MetaOapg.properties.model - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: - ... - + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: - ... - + def __getitem__(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: - ... - + def __getitem__(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: - ... - + def __getitem__(self, name: typing_extensions.Literal["validation_file"]) -> MetaOapg.properties.validation_file: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["validation_file"]) -> MetaOapg.properties.validation_file: - ... - + def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["wandb_config"]) -> MetaOapg.properties.wandb_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["wandb_config"]) -> MetaOapg.properties.wandb_config: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - "wandb_config", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model", "training_file", "hyperparameters", "validation_file", "suffix", "wandb_config", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["hyperparameters"]) -> MetaOapg.properties.hyperparameters: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["suffix"] - ) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["validation_file"]) -> typing.Union[MetaOapg.properties.validation_file, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["validation_file"] - ) -> typing.Union[MetaOapg.properties.validation_file, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["suffix"]) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["wandb_config"] - ) -> typing.Union[MetaOapg.properties.wandb_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["wandb_config"]) -> typing.Union[MetaOapg.properties.wandb_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - "wandb_config", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model", "training_file", "hyperparameters", "validation_file", "suffix", "wandb_config", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - training_file: typing.Union[ - MetaOapg.properties.training_file, - str, - ], - hyperparameters: typing.Union[ - MetaOapg.properties.hyperparameters, - dict, - frozendict.frozendict, - ], - model: typing.Union[ - MetaOapg.properties.model, - str, - ], - suffix: typing.Union[MetaOapg.properties.suffix, str, schemas.Unset] = schemas.unset, - validation_file: typing.Union[MetaOapg.properties.validation_file, str, schemas.Unset] = schemas.unset, - wandb_config: typing.Union[ - MetaOapg.properties.wandb_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + training_file: typing.Union[MetaOapg.properties.training_file, str, ], + hyperparameters: typing.Union[MetaOapg.properties.hyperparameters, dict, frozendict.frozendict, ], + model: typing.Union[MetaOapg.properties.model, str, ], + validation_file: typing.Union[MetaOapg.properties.validation_file, None, str, schemas.Unset] = schemas.unset, + suffix: typing.Union[MetaOapg.properties.suffix, None, str, schemas.Unset] = schemas.unset, + wandb_config: typing.Union[MetaOapg.properties.wandb_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneRequest": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateFineTuneRequest': return super().__new__( cls, *_args, training_file=training_file, hyperparameters=hyperparameters, model=model, - suffix=suffix, validation_file=validation_file, + suffix=suffix, wandb_config=wandb_config, _configuration=_configuration, **kwargs, diff --git a/launch/api_client/model/create_fine_tune_request.pyi b/launch/api_client/model/create_fine_tune_request.pyi deleted file mode 100644 index 24b9815d..00000000 --- a/launch/api_client/model/create_fine_tune_request.pyi +++ /dev/null @@ -1,295 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateFineTuneRequest(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "training_file", - "hyperparameters", - "model", - } - - class properties: - class hyperparameters(schemas.DictSchema): - class MetaOapg: - class additional_properties( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - any_of_3 = schemas.DictSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - cls.any_of_3, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "additional_properties": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - ) -> "hyperparameters": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - model = schemas.StrSchema - training_file = schemas.StrSchema - suffix = schemas.StrSchema - validation_file = schemas.StrSchema - wandb_config = schemas.DictSchema - __annotations__ = { - "hyperparameters": hyperparameters, - "model": model, - "training_file": training_file, - "suffix": suffix, - "validation_file": validation_file, - "wandb_config": wandb_config, - } - training_file: MetaOapg.properties.training_file - hyperparameters: MetaOapg.properties.hyperparameters - model: MetaOapg.properties.model - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["hyperparameters"] - ) -> MetaOapg.properties.hyperparameters: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["validation_file"] - ) -> MetaOapg.properties.validation_file: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["wandb_config"]) -> MetaOapg.properties.wandb_config: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - "wandb_config", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["hyperparameters"] - ) -> MetaOapg.properties.hyperparameters: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["training_file"]) -> MetaOapg.properties.training_file: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["suffix"] - ) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["validation_file"] - ) -> typing.Union[MetaOapg.properties.validation_file, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["wandb_config"] - ) -> typing.Union[MetaOapg.properties.wandb_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "hyperparameters", - "model", - "training_file", - "suffix", - "validation_file", - "wandb_config", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - training_file: typing.Union[ - MetaOapg.properties.training_file, - str, - ], - hyperparameters: typing.Union[ - MetaOapg.properties.hyperparameters, - dict, - frozendict.frozendict, - ], - model: typing.Union[ - MetaOapg.properties.model, - str, - ], - suffix: typing.Union[MetaOapg.properties.suffix, str, schemas.Unset] = schemas.unset, - validation_file: typing.Union[MetaOapg.properties.validation_file, str, schemas.Unset] = schemas.unset, - wandb_config: typing.Union[ - MetaOapg.properties.wandb_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneRequest": - return super().__new__( - cls, - *_args, - training_file=training_file, - hyperparameters=hyperparameters, - model=model, - suffix=suffix, - validation_file=validation_file, - wandb_config=wandb_config, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_fine_tune_response.py b/launch/api_client/model/create_fine_tune_response.py index 2d78bb58..449f305e 100644 --- a/launch/api_client/model/create_fine_tune_response.py +++ b/launch/api_client/model/create_fine_tune_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateFineTuneResponse(schemas.DictSchema): +class CreateFineTuneResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "id", } - + class properties: id = schemas.StrSchema __annotations__ = { "id": id, } - + id: MetaOapg.properties.id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + id: typing.Union[MetaOapg.properties.id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateFineTuneResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateFineTuneResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_light_llm_model_endpoint_request.py b/launch/api_client/model/create_light_llm_model_endpoint_request.py new file mode 100644 index 00000000..e8b8fa2c --- /dev/null +++ b/launch/api_client/model/create_light_llm_model_endpoint_request.py @@ -0,0 +1,842 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateLightLLMModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "metadata", + "model_name", + "max_workers", + "min_workers", + "name", + "per_worker", + "labels", + } + + class properties: + name = schemas.StrSchema + model_name = schemas.StrSchema + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + min_workers = schemas.IntSchema + max_workers = schemas.IntSchema + per_worker = schemas.IntSchema + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + inference_framework_image_tag = schemas.StrSchema + num_shards = schemas.IntSchema + + @staticmethod + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "lightllm": "LIGHTLLM", + } + + @schemas.classproperty + def LIGHTLLM(cls): + return cls("lightllm") + __annotations__ = { + "name": name, + "model_name": model_name, + "metadata": metadata, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "source": source, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "endpoint_type": endpoint_type, + "inference_framework": inference_framework, + } + + metadata: MetaOapg.properties.metadata + model_name: MetaOapg.properties.model_name + max_workers: MetaOapg.properties.max_workers + min_workers: MetaOapg.properties.min_workers + name: MetaOapg.properties.name + per_worker: MetaOapg.properties.per_worker + labels: MetaOapg.properties.labels + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + name: typing.Union[MetaOapg.properties.name, str, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, + endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateLightLLMModelEndpointRequest': + return super().__new__( + cls, + *_args, + metadata=metadata, + model_name=model_name, + max_workers=max_workers, + min_workers=min_workers, + name=name, + per_worker=per_worker, + labels=labels, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + source=source, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + endpoint_type=endpoint_type, + inference_framework=inference_framework, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.model_endpoint_type import ModelEndpointType +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_llm_model_endpoint_v1_request.py b/launch/api_client/model/create_llm_model_endpoint_v1_request.py index 599c36f5..28fdbae5 100644 --- a/launch/api_client/model/create_llm_model_endpoint_v1_request.py +++ b/launch/api_client/model/create_llm_model_endpoint_v1_request.py @@ -23,1146 +23,66 @@ from launch.api_client import schemas # noqa: F401 -class CreateLLMModelEndpointV1Request(schemas.DictSchema): +class CreateLLMModelEndpointV1Request( + schemas.ComposedSchema, +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - max_workers = schemas.IntSchema - metadata = schemas.DictSchema - min_workers = schemas.IntSchema - model_name = schemas.StrSchema - name = schemas.StrSchema - per_worker = schemas.IntSchema - billing_tags = schemas.DictSchema - checkpoint_path = schemas.StrSchema - - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - class MetaOapg: - format = "uri" - max_length = 2083 - min_length = 1 - - class endpoint_type( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ModelEndpointType, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "endpoint_type": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - - gpus = schemas.IntSchema - high_priority = schemas.BoolSchema - - class inference_framework( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - LLMInferenceFramework, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "inference_framework": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - inference_framework_image_tag = schemas.StrSchema - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - num_shards = schemas.IntSchema - optimize_costs = schemas.BoolSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - - @staticmethod - def quantize() -> typing.Type["Quantization"]: - return Quantization - - class source( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - LLMSource, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "source": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - __annotations__ = { - "labels": labels, - "max_workers": max_workers, - "metadata": metadata, - "min_workers": min_workers, - "model_name": model_name, - "name": name, - "per_worker": per_worker, - "billing_tags": billing_tags, - "checkpoint_path": checkpoint_path, - "cpus": cpus, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "endpoint_type": endpoint_type, - "gpu_type": gpu_type, - "gpus": gpus, - "high_priority": high_priority, - "inference_framework": inference_framework, - "inference_framework_image_tag": inference_framework_image_tag, - "memory": memory, - "num_shards": num_shards, - "optimize_costs": optimize_costs, - "post_inference_hooks": post_inference_hooks, - "prewarm": prewarm, - "public_inference": public_inference, - "quantize": quantize, - "source": source, - "storage": storage, - } - - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> MetaOapg.properties.endpoint_type: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework"] - ) -> MetaOapg.properties.inference_framework: - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> MetaOapg.properties.inference_framework_image_tag: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> MetaOapg.properties.source: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "max_workers", - "metadata", - "min_workers", - "model_name", - "name", - "per_worker", - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "endpoint_type", - "gpu_type", - "gpus", - "high_priority", - "inference_framework", - "inference_framework_image_tag", - "memory", - "num_shards", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_type"] - ) -> typing.Union[MetaOapg.properties.endpoint_type, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework"] - ) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union["Quantization", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["source"] - ) -> typing.Union[MetaOapg.properties.source, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... + class MetaOapg: + + @classmethod + @functools.lru_cache() + def one_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + CreateVLLMModelEndpointRequest, + CreateSGLangModelEndpointRequest, + CreateDeepSpeedModelEndpointRequest, + CreateTextGenerationInferenceModelEndpointRequest, + CreateLightLLMModelEndpointRequest, + CreateTensorRTLLMModelEndpointRequest, + ] - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "max_workers", - "metadata", - "min_workers", - "model_name", - "name", - "per_worker", - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "endpoint_type", - "gpu_type", - "gpus", - "high_priority", - "inference_framework", - "inference_framework_image_tag", - "memory", - "num_shards", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - model_name: typing.Union[ - MetaOapg.properties.model_name, - str, - ], - max_workers: typing.Union[ - MetaOapg.properties.max_workers, - decimal.Decimal, - int, - ], - min_workers: typing.Union[ - MetaOapg.properties.min_workers, - decimal.Decimal, - int, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - per_worker: typing.Union[ - MetaOapg.properties.per_worker, - decimal.Decimal, - int, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - endpoint_type: typing.Union[ - MetaOapg.properties.endpoint_type, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - inference_framework: typing.Union[ - MetaOapg.properties.inference_framework, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - inference_framework_image_tag: typing.Union[ - MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset - ] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - source: typing.Union[ - MetaOapg.properties.source, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateLLMModelEndpointV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateLLMModelEndpointV1Request': return super().__new__( cls, *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - billing_tags=billing_tags, - checkpoint_path=checkpoint_path, - cpus=cpus, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - endpoint_type=endpoint_type, - gpu_type=gpu_type, - gpus=gpus, - high_priority=high_priority, - inference_framework=inference_framework, - inference_framework_image_tag=inference_framework_image_tag, - memory=memory, - num_shards=num_shards, - optimize_costs=optimize_costs, - post_inference_hooks=post_inference_hooks, - prewarm=prewarm, - public_inference=public_inference, - quantize=quantize, - source=source, - storage=storage, _configuration=_configuration, **kwargs, ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_inference_framework import ( - LLMInferenceFramework, +from launch.api_client.model.create_deep_speed_model_endpoint_request import ( + CreateDeepSpeedModelEndpointRequest, +) +from launch.api_client.model.create_light_llm_model_endpoint_request import ( + CreateLightLLMModelEndpointRequest, +) +from launch.api_client.model.create_sg_lang_model_endpoint_request import ( + CreateSGLangModelEndpointRequest, +) +from launch.api_client.model.create_tensor_rtllm_model_endpoint_request import ( + CreateTensorRTLLMModelEndpointRequest, +) +from launch.api_client.model.create_text_generation_inference_model_endpoint_request import ( + CreateTextGenerationInferenceModelEndpointRequest, +) +from launch.api_client.model.create_vllm_model_endpoint_request import ( + CreateVLLMModelEndpointRequest, ) -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.model_endpoint_type import ModelEndpointType -from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_llm_model_endpoint_v1_request.pyi b/launch/api_client/model/create_llm_model_endpoint_v1_request.pyi deleted file mode 100644 index a97d0c15..00000000 --- a/launch/api_client/model/create_llm_model_endpoint_v1_request.pyi +++ /dev/null @@ -1,1032 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateLLMModelEndpointV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "metadata", - "model_name", - "max_workers", - "min_workers", - "name", - "per_worker", - "labels", - } - - class properties: - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - max_workers = schemas.IntSchema - metadata = schemas.DictSchema - min_workers = schemas.IntSchema - model_name = schemas.StrSchema - name = schemas.StrSchema - per_worker = schemas.IntSchema - billing_tags = schemas.DictSchema - checkpoint_path = schemas.StrSchema - - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - pass - - class endpoint_type( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - ModelEndpointType, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "endpoint_type": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - gpus = schemas.IntSchema - high_priority = schemas.BoolSchema - - class inference_framework( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - LLMInferenceFramework, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "inference_framework": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - inference_framework_image_tag = schemas.StrSchema - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - num_shards = schemas.IntSchema - optimize_costs = schemas.BoolSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - - @staticmethod - def quantize() -> typing.Type["Quantization"]: - return Quantization - - class source( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - LLMSource, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "source": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "labels": labels, - "max_workers": max_workers, - "metadata": metadata, - "min_workers": min_workers, - "model_name": model_name, - "name": name, - "per_worker": per_worker, - "billing_tags": billing_tags, - "checkpoint_path": checkpoint_path, - "cpus": cpus, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "endpoint_type": endpoint_type, - "gpu_type": gpu_type, - "gpus": gpus, - "high_priority": high_priority, - "inference_framework": inference_framework, - "inference_framework_image_tag": inference_framework_image_tag, - "memory": memory, - "num_shards": num_shards, - "optimize_costs": optimize_costs, - "post_inference_hooks": post_inference_hooks, - "prewarm": prewarm, - "public_inference": public_inference, - "quantize": quantize, - "source": source, - "storage": storage, - } - metadata: MetaOapg.properties.metadata - model_name: MetaOapg.properties.model_name - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> MetaOapg.properties.checkpoint_path: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> MetaOapg.properties.endpoint_type: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework"] - ) -> MetaOapg.properties.inference_framework: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> MetaOapg.properties.inference_framework_image_tag: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["public_inference"] - ) -> MetaOapg.properties.public_inference: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> MetaOapg.properties.source: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "max_workers", - "metadata", - "min_workers", - "model_name", - "name", - "per_worker", - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "endpoint_type", - "gpu_type", - "gpus", - "high_priority", - "inference_framework", - "inference_framework_image_tag", - "memory", - "num_shards", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_type"] - ) -> typing.Union[MetaOapg.properties.endpoint_type, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework"] - ) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["quantize"] - ) -> typing.Union["Quantization", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["source"] - ) -> typing.Union[MetaOapg.properties.source, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "labels", - "max_workers", - "metadata", - "min_workers", - "model_name", - "name", - "per_worker", - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "endpoint_type", - "gpu_type", - "gpus", - "high_priority", - "inference_framework", - "inference_framework_image_tag", - "memory", - "num_shards", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - model_name: typing.Union[ - MetaOapg.properties.model_name, - str, - ], - max_workers: typing.Union[ - MetaOapg.properties.max_workers, - decimal.Decimal, - int, - ], - min_workers: typing.Union[ - MetaOapg.properties.min_workers, - decimal.Decimal, - int, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - per_worker: typing.Union[ - MetaOapg.properties.per_worker, - decimal.Decimal, - int, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - endpoint_type: typing.Union[ - MetaOapg.properties.endpoint_type, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - inference_framework: typing.Union[ - MetaOapg.properties.inference_framework, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - inference_framework_image_tag: typing.Union[ - MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset - ] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - source: typing.Union[ - MetaOapg.properties.source, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateLLMModelEndpointV1Request": - return super().__new__( - cls, - *_args, - metadata=metadata, - model_name=model_name, - max_workers=max_workers, - min_workers=min_workers, - name=name, - per_worker=per_worker, - labels=labels, - billing_tags=billing_tags, - checkpoint_path=checkpoint_path, - cpus=cpus, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - endpoint_type=endpoint_type, - gpu_type=gpu_type, - gpus=gpus, - high_priority=high_priority, - inference_framework=inference_framework, - inference_framework_image_tag=inference_framework_image_tag, - memory=memory, - num_shards=num_shards, - optimize_costs=optimize_costs, - post_inference_hooks=post_inference_hooks, - prewarm=prewarm, - public_inference=public_inference, - quantize=quantize, - source=source, - storage=storage, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.callback_auth import CallbackAuth -from launch_client.model.gpu_type import GpuType -from launch_client.model.llm_inference_framework import LLMInferenceFramework -from launch_client.model.llm_source import LLMSource -from launch_client.model.model_endpoint_type import ModelEndpointType -from launch_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_llm_model_endpoint_v1_response.py b/launch/api_client/model/create_llm_model_endpoint_v1_response.py index d5c200a5..f7742cc3 100644 --- a/launch/api_client/model/create_llm_model_endpoint_v1_response.py +++ b/launch/api_client/model/create_llm_model_endpoint_v1_response.py @@ -23,93 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateLLMModelEndpointV1Response(schemas.DictSchema): +class CreateLLMModelEndpointV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "endpoint_creation_task_id", } - + class properties: endpoint_creation_task_id = schemas.StrSchema __annotations__ = { "endpoint_creation_task_id": endpoint_creation_task_id, } - + endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateLLMModelEndpointV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateLLMModelEndpointV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_llm_model_endpoint_v1_response.pyi b/launch/api_client/model/create_llm_model_endpoint_v1_response.pyi deleted file mode 100644 index 276ae72a..00000000 --- a/launch/api_client/model/create_llm_model_endpoint_v1_response.pyi +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateLLMModelEndpointV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateLLMModelEndpointV1Response": - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_model_bundle_v1_request.py b/launch/api_client/model/create_model_bundle_v1_request.py index c5170611..83ee0b75 100644 --- a/launch/api_client/model/create_model_bundle_v1_request.py +++ b/launch/api_client/model/create_model_bundle_v1_request.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CreateModelBundleV1Request(schemas.DictSchema): +class CreateModelBundleV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class CreateModelBundleV1Request(schemas.DictSchema): Request object for creating a Model Bundle. """ + class MetaOapg: required = { "requirements", @@ -40,231 +43,221 @@ class MetaOapg: "location", "env_params", } - + class properties: - @staticmethod - def env_params() -> typing.Type["ModelBundleEnvironmentParams"]: - return ModelBundleEnvironmentParams - - location = schemas.StrSchema name = schemas.StrSchema - - @staticmethod - def packaging_type() -> typing.Type["ModelBundlePackagingType"]: - return ModelBundlePackagingType - - class requirements(schemas.ListSchema): + location = schemas.StrSchema + + + class requirements( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": + ) -> 'requirements': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - app_config = schemas.DictSchema - metadata = schemas.DictSchema - schema_location = schemas.StrSchema + + @staticmethod + def env_params() -> typing.Type['ModelBundleEnvironmentParams']: + return ModelBundleEnvironmentParams + + @staticmethod + def packaging_type() -> typing.Type['ModelBundlePackagingType']: + return ModelBundlePackagingType + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class app_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'app_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class schema_location( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'schema_location': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "env_params": env_params, - "location": location, "name": name, - "packaging_type": packaging_type, + "location": location, "requirements": requirements, - "app_config": app_config, + "env_params": env_params, + "packaging_type": packaging_type, "metadata": metadata, + "app_config": app_config, "schema_location": schema_location, } - + requirements: MetaOapg.properties.requirements - packaging_type: "ModelBundlePackagingType" + packaging_type: 'ModelBundlePackagingType' name: MetaOapg.properties.name location: MetaOapg.properties.location - env_params: "ModelBundleEnvironmentParams" - + env_params: 'ModelBundleEnvironmentParams' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": - ... - + def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: - ... - + def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "env_params", - "location", - "name", - "packaging_type", - "requirements", - "app_config", - "metadata", - "schema_location", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "location", "requirements", "env_params", "packaging_type", "metadata", "app_config", "schema_location", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["schema_location"] - ) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "env_params", - "location", - "name", - "packaging_type", - "requirements", - "app_config", - "metadata", - "schema_location", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "location", "requirements", "env_params", "packaging_type", "metadata", "app_config", "schema_location", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - packaging_type: "ModelBundlePackagingType", - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - env_params: "ModelBundleEnvironmentParams", - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - schema_location: typing.Union[MetaOapg.properties.schema_location, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], + packaging_type: 'ModelBundlePackagingType', + name: typing.Union[MetaOapg.properties.name, str, ], + location: typing.Union[MetaOapg.properties.location, str, ], + env_params: 'ModelBundleEnvironmentParams', + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + schema_location: typing.Union[MetaOapg.properties.schema_location, None, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateModelBundleV1Request': return super().__new__( cls, *_args, @@ -273,14 +266,13 @@ def __new__( name=name, location=location, env_params=env_params, - app_config=app_config, metadata=metadata, + app_config=app_config, schema_location=schema_location, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.model_bundle_environment_params import ( ModelBundleEnvironmentParams, ) diff --git a/launch/api_client/model/create_model_bundle_v1_request.pyi b/launch/api_client/model/create_model_bundle_v1_request.pyi deleted file mode 100644 index 24f63ed8..00000000 --- a/launch/api_client/model/create_model_bundle_v1_request.pyi +++ /dev/null @@ -1,245 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateModelBundleV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for creating a Model Bundle. - """ - - class MetaOapg: - required = { - "requirements", - "packaging_type", - "name", - "location", - "env_params", - } - - class properties: - @staticmethod - def env_params() -> typing.Type["ModelBundleEnvironmentParams"]: - return ModelBundleEnvironmentParams - location = schemas.StrSchema - name = schemas.StrSchema - - @staticmethod - def packaging_type() -> typing.Type["ModelBundlePackagingType"]: - return ModelBundlePackagingType - - class requirements(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - app_config = schemas.DictSchema - metadata = schemas.DictSchema - schema_location = schemas.StrSchema - __annotations__ = { - "env_params": env_params, - "location": location, - "name": name, - "packaging_type": packaging_type, - "requirements": requirements, - "app_config": app_config, - "metadata": metadata, - "schema_location": schema_location, - } - requirements: MetaOapg.properties.requirements - packaging_type: "ModelBundlePackagingType" - name: MetaOapg.properties.name - location: MetaOapg.properties.location - env_params: "ModelBundleEnvironmentParams" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["schema_location"] - ) -> MetaOapg.properties.schema_location: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "env_params", - "location", - "name", - "packaging_type", - "requirements", - "app_config", - "metadata", - "schema_location", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["schema_location"] - ) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "env_params", - "location", - "name", - "packaging_type", - "requirements", - "app_config", - "metadata", - "schema_location", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - packaging_type: "ModelBundlePackagingType", - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - env_params: "ModelBundleEnvironmentParams", - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - schema_location: typing.Union[MetaOapg.properties.schema_location, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV1Request": - return super().__new__( - cls, - *_args, - requirements=requirements, - packaging_type=packaging_type, - name=name, - location=location, - env_params=env_params, - app_config=app_config, - metadata=metadata, - schema_location=schema_location, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch_client.model.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) diff --git a/launch/api_client/model/create_model_bundle_v1_response.py b/launch/api_client/model/create_model_bundle_v1_response.py index 394aaf62..1a461552 100644 --- a/launch/api_client/model/create_model_bundle_v1_response.py +++ b/launch/api_client/model/create_model_bundle_v1_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CreateModelBundleV1Response(schemas.DictSchema): +class CreateModelBundleV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,82 +34,48 @@ class CreateModelBundleV1Response(schemas.DictSchema): Response object for creating a Model Bundle. """ + class MetaOapg: required = { "model_bundle_id", } - + class properties: model_bundle_id = schemas.StrSchema __annotations__ = { "model_bundle_id": model_bundle_id, } - + model_bundle_id: MetaOapg.properties.model_bundle_id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateModelBundleV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_model_bundle_v1_response.pyi b/launch/api_client/model/create_model_bundle_v1_response.pyi deleted file mode 100644 index 16fc7788..00000000 --- a/launch/api_client/model/create_model_bundle_v1_response.pyi +++ /dev/null @@ -1,108 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateModelBundleV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for creating a Model Bundle. - """ - - class MetaOapg: - required = { - "model_bundle_id", - } - - class properties: - model_bundle_id = schemas.StrSchema - __annotations__ = { - "model_bundle_id": model_bundle_id, - } - model_bundle_id: MetaOapg.properties.model_bundle_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV1Response": - return super().__new__( - cls, - *_args, - model_bundle_id=model_bundle_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_model_bundle_v2_request.py b/launch/api_client/model/create_model_bundle_v2_request.py index f2b3eaa7..1f21fd72 100644 --- a/launch/api_client/model/create_model_bundle_v2_request.py +++ b/launch/api_client/model/create_model_bundle_v2_request.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CreateModelBundleV2Request(schemas.DictSchema): +class CreateModelBundleV2Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,18 +34,26 @@ class CreateModelBundleV2Request(schemas.DictSchema): Request object for creating a Model Bundle. """ + class MetaOapg: required = { "flavor", "name", "schema_location", } - + class properties: + name = schemas.StrSchema + schema_location = schemas.StrSchema + + class flavor( schemas.ComposedSchema, ): + + class MetaOapg: + @classmethod @functools.lru_cache() def one_of(cls): @@ -61,191 +71,113 @@ def one_of(cls): StreamingEnhancedRunnableImageFlavor, TritonEnhancedRunnableImageFlavor, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "flavor": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'flavor': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - name = schemas.StrSchema - schema_location = schemas.StrSchema - metadata = schemas.DictSchema __annotations__ = { - "flavor": flavor, "name": name, "schema_location": schema_location, + "flavor": flavor, "metadata": metadata, } - + flavor: MetaOapg.properties.flavor name: MetaOapg.properties.name schema_location: MetaOapg.properties.schema_location - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: - ... - + def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "name", - "schema_location", - "metadata", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "schema_location", "flavor", "metadata", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "name", - "schema_location", - "metadata", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "schema_location", "flavor", "metadata", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - schema_location: typing.Union[ - MetaOapg.properties.schema_location, - str, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + flavor: typing.Union[MetaOapg.properties.flavor, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + name: typing.Union[MetaOapg.properties.name, str, ], + schema_location: typing.Union[MetaOapg.properties.schema_location, str, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV2Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateModelBundleV2Request': return super().__new__( cls, *_args, @@ -257,7 +189,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.cloudpickle_artifact_flavor import ( CloudpickleArtifactFlavor, ) diff --git a/launch/api_client/model/create_model_bundle_v2_request.pyi b/launch/api_client/model/create_model_bundle_v2_request.pyi deleted file mode 100644 index e2b52ee7..00000000 --- a/launch/api_client/model/create_model_bundle_v2_request.pyi +++ /dev/null @@ -1,247 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateModelBundleV2Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Request object for creating a Model Bundle. - """ - - class MetaOapg: - required = { - "flavor", - "name", - "schema_location", - } - - class properties: - class flavor( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CloudpickleArtifactFlavor, - ZipArtifactFlavor, - RunnableImageFlavor, - StreamingEnhancedRunnableImageFlavor, - TritonEnhancedRunnableImageFlavor, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "flavor": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - name = schemas.StrSchema - schema_location = schemas.StrSchema - metadata = schemas.DictSchema - __annotations__ = { - "flavor": flavor, - "name": name, - "schema_location": schema_location, - "metadata": metadata, - } - flavor: MetaOapg.properties.flavor - name: MetaOapg.properties.name - schema_location: MetaOapg.properties.schema_location - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["schema_location"] - ) -> MetaOapg.properties.schema_location: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "name", - "schema_location", - "metadata", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["schema_location"] - ) -> MetaOapg.properties.schema_location: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "name", - "schema_location", - "metadata", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - schema_location: typing.Union[ - MetaOapg.properties.schema_location, - str, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV2Request": - return super().__new__( - cls, - *_args, - flavor=flavor, - name=name, - schema_location=schema_location, - metadata=metadata, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) -from launch_client.model.runnable_image_flavor import RunnableImageFlavor -from launch_client.model.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) -from launch_client.model.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) -from launch_client.model.zip_artifact_flavor import ZipArtifactFlavor diff --git a/launch/api_client/model/create_model_bundle_v2_response.py b/launch/api_client/model/create_model_bundle_v2_response.py index c108a9e0..b79c1e80 100644 --- a/launch/api_client/model/create_model_bundle_v2_response.py +++ b/launch/api_client/model/create_model_bundle_v2_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CreateModelBundleV2Response(schemas.DictSchema): +class CreateModelBundleV2Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,82 +34,48 @@ class CreateModelBundleV2Response(schemas.DictSchema): Response object for creating a Model Bundle. """ + class MetaOapg: required = { "model_bundle_id", } - + class properties: model_bundle_id = schemas.StrSchema __annotations__ = { "model_bundle_id": model_bundle_id, } - + model_bundle_id: MetaOapg.properties.model_bundle_id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV2Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateModelBundleV2Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_model_bundle_v2_response.pyi b/launch/api_client/model/create_model_bundle_v2_response.pyi deleted file mode 100644 index 7126d51c..00000000 --- a/launch/api_client/model/create_model_bundle_v2_response.pyi +++ /dev/null @@ -1,108 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateModelBundleV2Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for creating a Model Bundle. - """ - - class MetaOapg: - required = { - "model_bundle_id", - } - - class properties: - model_bundle_id = schemas.StrSchema - __annotations__ = { - "model_bundle_id": model_bundle_id, - } - model_bundle_id: MetaOapg.properties.model_bundle_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundle_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelBundleV2Response": - return super().__new__( - cls, - *_args, - model_bundle_id=model_bundle_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_model_endpoint_v1_request.py b/launch/api_client/model/create_model_endpoint_v1_request.py index 55d68842..06c5a6ab 100644 --- a/launch/api_client/model/create_model_endpoint_v1_request.py +++ b/launch/api_client/model/create_model_endpoint_v1_request.py @@ -23,13 +23,16 @@ from launch.api_client import schemas # noqa: F401 -class CreateModelEndpointV1Request(schemas.DictSchema): +class CreateModelEndpointV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "endpoint_type", @@ -42,18 +45,66 @@ class MetaOapg: "gpus", "name", "per_worker", + "storage", "labels", } - + class properties: + + + class name( + schemas.StrSchema + ): + + + class MetaOapg: + max_length = 63 + model_bundle_id = schemas.StrSchema + + @staticmethod + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + class cpus( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -69,108 +120,82 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - @staticmethod - def endpoint_type() -> typing.Type["ModelEndpointType"]: - return ModelEndpointType - - class gpus(schemas.IntSchema): + + + class gpus( + schemas.IntSchema + ): + + class MetaOapg: inclusive_minimum = 0 - - class labels(schemas.DictSchema): + + + class memory( + schemas.ComposedSchema, + ): + + class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - class max_workers(schemas.IntSchema): - class MetaOapg: - inclusive_minimum = 0 - - class memory( + + + class storage( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -186,212 +211,281 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - metadata = schemas.DictSchema - - class min_workers(schemas.IntSchema): + + + class min_workers( + schemas.IntSchema + ): + + class MetaOapg: inclusive_minimum = 0 - - model_bundle_id = schemas.StrSchema - - class name(schemas.StrSchema): + + + class max_workers( + schemas.IntSchema + ): + + class MetaOapg: - max_length = 63 - + inclusive_minimum = 0 per_worker = schemas.IntSchema - billing_tags = schemas.DictSchema - - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): + + + class labels( + schemas.DictSchema + ): + + class MetaOapg: - format = "uri" - max_length = 2083 - min_length = 1 - - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - - high_priority = schemas.BoolSchema - optimize_costs = schemas.BoolSchema - - class post_inference_hooks(schemas.ListSchema): + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + class MetaOapg: items = schemas.StrSchema - + + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[list, tuple, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": + ) -> 'post_inference_hooks': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - - class storage( - schemas.ComposedSchema, + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + nodes_per_worker = schemas.IntSchema + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class concurrent_requests_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'concurrent_requests_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "cpus": cpus, + "name": name, + "model_bundle_id": model_bundle_id, "endpoint_type": endpoint_type, + "metadata": metadata, + "cpus": cpus, "gpus": gpus, - "labels": labels, - "max_workers": max_workers, "memory": memory, - "metadata": metadata, + "storage": storage, "min_workers": min_workers, - "model_bundle_id": model_bundle_id, - "name": name, + "max_workers": max_workers, "per_worker": per_worker, - "billing_tags": billing_tags, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, + "labels": labels, + "post_inference_hooks": post_inference_hooks, "gpu_type": gpu_type, - "high_priority": high_priority, + "nodes_per_worker": nodes_per_worker, "optimize_costs": optimize_costs, - "post_inference_hooks": post_inference_hooks, + "concurrent_requests_per_worker": concurrent_requests_per_worker, "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, "public_inference": public_inference, - "storage": storage, } - - endpoint_type: "ModelEndpointType" + + endpoint_type: 'ModelEndpointType' metadata: MetaOapg.properties.metadata memory: MetaOapg.properties.memory cpus: MetaOapg.properties.cpus @@ -401,405 +495,191 @@ def __new__( gpus: MetaOapg.properties.gpus name: MetaOapg.properties.name per_worker: MetaOapg.properties.per_worker + storage: MetaOapg.properties.storage labels: MetaOapg.properties.labels - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": - ... - + def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: - ... - + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": - ... - + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": - ... - + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: - ... - + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: - ... - + def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: - ... - + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: - ... - + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: - ... - + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "endpoint_type", - "gpus", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "name", - "per_worker", - "billing_tags", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "high_priority", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_bundle_id", "endpoint_type", "metadata", "cpus", "gpus", "memory", "storage", "min_workers", "max_workers", "per_worker", "labels", "post_inference_hooks", "gpu_type", "nodes_per_worker", "optimize_costs", "concurrent_requests_per_worker", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> typing.Union[MetaOapg.properties.concurrent_requests_per_worker, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "endpoint_type", - "gpus", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "name", - "per_worker", - "billing_tags", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "high_priority", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_bundle_id", "endpoint_type", "metadata", "cpus", "gpus", "memory", "storage", "min_workers", "max_workers", "per_worker", "labels", "post_inference_hooks", "gpu_type", "nodes_per_worker", "optimize_costs", "concurrent_requests_per_worker", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_type: "ModelEndpointType", - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - max_workers: typing.Union[ - MetaOapg.properties.max_workers, - decimal.Decimal, - int, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], - min_workers: typing.Union[ - MetaOapg.properties.min_workers, - decimal.Decimal, - int, - ], - gpus: typing.Union[ - MetaOapg.properties.gpus, - decimal.Decimal, - int, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - per_worker: typing.Union[ - MetaOapg.properties.per_worker, - decimal.Decimal, - int, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + endpoint_type: 'ModelEndpointType', + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, ], + name: typing.Union[MetaOapg.properties.name, str, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelEndpointV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateModelEndpointV1Request': return super().__new__( cls, *_args, @@ -813,22 +693,23 @@ def __new__( gpus=gpus, name=name, per_worker=per_worker, + storage=storage, labels=labels, - billing_tags=billing_tags, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, + post_inference_hooks=post_inference_hooks, gpu_type=gpu_type, - high_priority=high_priority, + nodes_per_worker=nodes_per_worker, optimize_costs=optimize_costs, - post_inference_hooks=post_inference_hooks, + concurrent_requests_per_worker=concurrent_requests_per_worker, prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, public_inference=public_inference, - storage=storage, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.callback_auth import CallbackAuth from launch.api_client.model.gpu_type import GpuType from launch.api_client.model.model_endpoint_type import ModelEndpointType diff --git a/launch/api_client/model/create_model_endpoint_v1_request.pyi b/launch/api_client/model/create_model_endpoint_v1_request.pyi deleted file mode 100644 index f9363a0b..00000000 --- a/launch/api_client/model/create_model_endpoint_v1_request.pyi +++ /dev/null @@ -1,723 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateModelEndpointV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "endpoint_type", - "metadata", - "memory", - "cpus", - "max_workers", - "model_bundle_id", - "min_workers", - "gpus", - "name", - "per_worker", - "labels", - } - - class properties: - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def endpoint_type() -> typing.Type["ModelEndpointType"]: - return ModelEndpointType - - class gpus(schemas.IntSchema): - pass - - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - class max_workers(schemas.IntSchema): - pass - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - metadata = schemas.DictSchema - - class min_workers(schemas.IntSchema): - pass - model_bundle_id = schemas.StrSchema - - class name(schemas.StrSchema): - pass - per_worker = schemas.IntSchema - billing_tags = schemas.DictSchema - - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - pass - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - high_priority = schemas.BoolSchema - optimize_costs = schemas.BoolSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "cpus": cpus, - "endpoint_type": endpoint_type, - "gpus": gpus, - "labels": labels, - "max_workers": max_workers, - "memory": memory, - "metadata": metadata, - "min_workers": min_workers, - "model_bundle_id": model_bundle_id, - "name": name, - "per_worker": per_worker, - "billing_tags": billing_tags, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "gpu_type": gpu_type, - "high_priority": high_priority, - "optimize_costs": optimize_costs, - "post_inference_hooks": post_inference_hooks, - "prewarm": prewarm, - "public_inference": public_inference, - "storage": storage, - } - endpoint_type: "ModelEndpointType" - metadata: MetaOapg.properties.metadata - memory: MetaOapg.properties.memory - cpus: MetaOapg.properties.cpus - max_workers: MetaOapg.properties.max_workers - model_bundle_id: MetaOapg.properties.model_bundle_id - min_workers: MetaOapg.properties.min_workers - gpus: MetaOapg.properties.gpus - name: MetaOapg.properties.name - per_worker: MetaOapg.properties.per_worker - labels: MetaOapg.properties.labels - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["public_inference"] - ) -> MetaOapg.properties.public_inference: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "endpoint_type", - "gpus", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "name", - "per_worker", - "billing_tags", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "high_priority", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "endpoint_type", - "gpus", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "name", - "per_worker", - "billing_tags", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "high_priority", - "optimize_costs", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_type: "ModelEndpointType", - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - max_workers: typing.Union[ - MetaOapg.properties.max_workers, - decimal.Decimal, - int, - ], - model_bundle_id: typing.Union[ - MetaOapg.properties.model_bundle_id, - str, - ], - min_workers: typing.Union[ - MetaOapg.properties.min_workers, - decimal.Decimal, - int, - ], - gpus: typing.Union[ - MetaOapg.properties.gpus, - decimal.Decimal, - int, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - per_worker: typing.Union[ - MetaOapg.properties.per_worker, - decimal.Decimal, - int, - ], - labels: typing.Union[ - MetaOapg.properties.labels, - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelEndpointV1Request": - return super().__new__( - cls, - *_args, - endpoint_type=endpoint_type, - metadata=metadata, - memory=memory, - cpus=cpus, - max_workers=max_workers, - model_bundle_id=model_bundle_id, - min_workers=min_workers, - gpus=gpus, - name=name, - per_worker=per_worker, - labels=labels, - billing_tags=billing_tags, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - gpu_type=gpu_type, - high_priority=high_priority, - optimize_costs=optimize_costs, - post_inference_hooks=post_inference_hooks, - prewarm=prewarm, - public_inference=public_inference, - storage=storage, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.callback_auth import CallbackAuth -from launch_client.model.gpu_type import GpuType -from launch_client.model.model_endpoint_type import ModelEndpointType diff --git a/launch/api_client/model/create_model_endpoint_v1_response.py b/launch/api_client/model/create_model_endpoint_v1_response.py index 1841c16f..e8c29dee 100644 --- a/launch/api_client/model/create_model_endpoint_v1_response.py +++ b/launch/api_client/model/create_model_endpoint_v1_response.py @@ -23,93 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateModelEndpointV1Response(schemas.DictSchema): +class CreateModelEndpointV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "endpoint_creation_task_id", } - + class properties: endpoint_creation_task_id = schemas.StrSchema __annotations__ = { "endpoint_creation_task_id": endpoint_creation_task_id, } - + endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelEndpointV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateModelEndpointV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_model_endpoint_v1_response.pyi b/launch/api_client/model/create_model_endpoint_v1_response.pyi deleted file mode 100644 index 57887d1e..00000000 --- a/launch/api_client/model/create_model_endpoint_v1_response.pyi +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateModelEndpointV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateModelEndpointV1Response": - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_sg_lang_model_endpoint_request.py b/launch/api_client/model/create_sg_lang_model_endpoint_request.py new file mode 100644 index 00000000..ebb54f24 --- /dev/null +++ b/launch/api_client/model/create_sg_lang_model_endpoint_request.py @@ -0,0 +1,3402 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateSGLangModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "metadata", + "model_name", + "max_workers", + "min_workers", + "name", + "per_worker", + "labels", + } + + class properties: + name = schemas.StrSchema + model_name = schemas.StrSchema + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + min_workers = schemas.IntSchema + max_workers = schemas.IntSchema + per_worker = schemas.IntSchema + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + inference_framework_image_tag = schemas.StrSchema + num_shards = schemas.IntSchema + + @staticmethod + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + + + class trust_remote_code( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'trust_remote_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tp_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tp_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_tokenizer_init( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_tokenizer_init': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class kv_cache_dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'kv_cache_dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization_param_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization_param_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class context_length( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'context_length': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class device( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'device': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class served_model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'served_model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class is_embedding( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'is_embedding': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class mem_fraction_static( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'mem_fraction_static': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_running_requests( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_running_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_total_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_total_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chunked_prefill_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chunked_prefill_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_prefill_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_prefill_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class schedule_policy( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'schedule_policy': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class schedule_conservativeness( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'schedule_conservativeness': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpu_offload_gb( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cpu_offload_gb': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prefill_only_one_req( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prefill_only_one_req': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stream_interval( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stream_interval': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class random_seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'random_seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class constrained_json_whitespace_pattern( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'constrained_json_whitespace_pattern': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class watchdog_timeout( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'watchdog_timeout': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class download_dir( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'download_dir': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class base_gpu_id( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'base_gpu_id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class log_level( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'log_level': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class log_level_http( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'log_level_http': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class log_requests( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'log_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class show_time_cost( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'show_time_cost': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class decode_log_interval( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'decode_log_interval': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class api_key( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'api_key': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class file_storage_pth( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'file_storage_pth': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_cache_report( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_cache_report': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class data_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'data_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_balance_method( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_balance_method': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class expert_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'expert_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dist_init_addr( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dist_init_addr': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class nnodes( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nnodes': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class node_rank( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'node_rank': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class json_model_override_args( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'json_model_override_args': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class lora_paths( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'lora_paths': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_loras_per_batch( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_loras_per_batch': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class attention_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'attention_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class sampling_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'sampling_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class grammar_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'grammar_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_algorithm( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_algorithm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_draft_model_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_draft_model_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_num_steps( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_num_steps': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_num_draft_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_num_draft_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_eagle_topk( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_eagle_topk': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_double_sparsity( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_double_sparsity': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_channel_config_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_channel_config_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_heavy_channel_num( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_heavy_channel_num': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_heavy_token_num( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_heavy_token_num': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_heavy_channel_type( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_heavy_channel_type': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_sparse_decode_threshold( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_sparse_decode_threshold': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_radix_cache( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_radix_cache': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_jump_forward( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_jump_forward': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_cuda_graph( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_cuda_graph': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_cuda_graph_padding( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_cuda_graph_padding': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_outlines_disk_cache( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_outlines_disk_cache': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_custom_all_reduce( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_custom_all_reduce': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_mla( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_mla': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_overlap_schedule( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_overlap_schedule': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_mixed_chunk( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_mixed_chunk': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_dp_attention( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_dp_attention': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_ep_moe( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_ep_moe': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_torch_compile( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_torch_compile': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class torch_compile_max_bs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'torch_compile_max_bs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cuda_graph_max_bs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cuda_graph_max_bs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cuda_graph_bs( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cuda_graph_bs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class torchao_config( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'torchao_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_nan_detection( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_nan_detection': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_p2p_check( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_p2p_check': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class triton_attention_reduce_in_fp32( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'triton_attention_reduce_in_fp32': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class triton_attention_num_kv_splits( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'triton_attention_num_kv_splits': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_continuous_decode_steps( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_continuous_decode_steps': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class delete_ckpt_after_loading( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'delete_ckpt_after_loading': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_memory_saver( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_memory_saver': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class allow_auto_truncate( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'allow_auto_truncate': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_custom_logit_processor( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_custom_logit_processor': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tool_call_parser( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tool_call_parser': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class huggingface_repo( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'huggingface_repo': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "sglang": "SGLANG", + } + + @schemas.classproperty + def SGLANG(cls): + return cls("sglang") + __annotations__ = { + "name": name, + "model_name": model_name, + "metadata": metadata, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "source": source, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "endpoint_type": endpoint_type, + "trust_remote_code": trust_remote_code, + "tp_size": tp_size, + "skip_tokenizer_init": skip_tokenizer_init, + "load_format": load_format, + "dtype": dtype, + "kv_cache_dtype": kv_cache_dtype, + "quantization_param_path": quantization_param_path, + "quantization": quantization, + "context_length": context_length, + "device": device, + "served_model_name": served_model_name, + "chat_template": chat_template, + "is_embedding": is_embedding, + "revision": revision, + "mem_fraction_static": mem_fraction_static, + "max_running_requests": max_running_requests, + "max_total_tokens": max_total_tokens, + "chunked_prefill_size": chunked_prefill_size, + "max_prefill_tokens": max_prefill_tokens, + "schedule_policy": schedule_policy, + "schedule_conservativeness": schedule_conservativeness, + "cpu_offload_gb": cpu_offload_gb, + "prefill_only_one_req": prefill_only_one_req, + "stream_interval": stream_interval, + "random_seed": random_seed, + "constrained_json_whitespace_pattern": constrained_json_whitespace_pattern, + "watchdog_timeout": watchdog_timeout, + "download_dir": download_dir, + "base_gpu_id": base_gpu_id, + "log_level": log_level, + "log_level_http": log_level_http, + "log_requests": log_requests, + "show_time_cost": show_time_cost, + "enable_metrics": enable_metrics, + "decode_log_interval": decode_log_interval, + "api_key": api_key, + "file_storage_pth": file_storage_pth, + "enable_cache_report": enable_cache_report, + "data_parallel_size": data_parallel_size, + "load_balance_method": load_balance_method, + "expert_parallel_size": expert_parallel_size, + "dist_init_addr": dist_init_addr, + "nnodes": nnodes, + "node_rank": node_rank, + "json_model_override_args": json_model_override_args, + "lora_paths": lora_paths, + "max_loras_per_batch": max_loras_per_batch, + "attention_backend": attention_backend, + "sampling_backend": sampling_backend, + "grammar_backend": grammar_backend, + "speculative_algorithm": speculative_algorithm, + "speculative_draft_model_path": speculative_draft_model_path, + "speculative_num_steps": speculative_num_steps, + "speculative_num_draft_tokens": speculative_num_draft_tokens, + "speculative_eagle_topk": speculative_eagle_topk, + "enable_double_sparsity": enable_double_sparsity, + "ds_channel_config_path": ds_channel_config_path, + "ds_heavy_channel_num": ds_heavy_channel_num, + "ds_heavy_token_num": ds_heavy_token_num, + "ds_heavy_channel_type": ds_heavy_channel_type, + "ds_sparse_decode_threshold": ds_sparse_decode_threshold, + "disable_radix_cache": disable_radix_cache, + "disable_jump_forward": disable_jump_forward, + "disable_cuda_graph": disable_cuda_graph, + "disable_cuda_graph_padding": disable_cuda_graph_padding, + "disable_outlines_disk_cache": disable_outlines_disk_cache, + "disable_custom_all_reduce": disable_custom_all_reduce, + "disable_mla": disable_mla, + "disable_overlap_schedule": disable_overlap_schedule, + "enable_mixed_chunk": enable_mixed_chunk, + "enable_dp_attention": enable_dp_attention, + "enable_ep_moe": enable_ep_moe, + "enable_torch_compile": enable_torch_compile, + "torch_compile_max_bs": torch_compile_max_bs, + "cuda_graph_max_bs": cuda_graph_max_bs, + "cuda_graph_bs": cuda_graph_bs, + "torchao_config": torchao_config, + "enable_nan_detection": enable_nan_detection, + "enable_p2p_check": enable_p2p_check, + "triton_attention_reduce_in_fp32": triton_attention_reduce_in_fp32, + "triton_attention_num_kv_splits": triton_attention_num_kv_splits, + "num_continuous_decode_steps": num_continuous_decode_steps, + "delete_ckpt_after_loading": delete_ckpt_after_loading, + "enable_memory_saver": enable_memory_saver, + "allow_auto_truncate": allow_auto_truncate, + "enable_custom_logit_processor": enable_custom_logit_processor, + "tool_call_parser": tool_call_parser, + "huggingface_repo": huggingface_repo, + "inference_framework": inference_framework, + } + + metadata: MetaOapg.properties.metadata + model_name: MetaOapg.properties.model_name + max_workers: MetaOapg.properties.max_workers + min_workers: MetaOapg.properties.min_workers + name: MetaOapg.properties.name + per_worker: MetaOapg.properties.per_worker + labels: MetaOapg.properties.labels + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tp_size"]) -> MetaOapg.properties.tp_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> MetaOapg.properties.kv_cache_dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["context_length"]) -> MetaOapg.properties.context_length: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["device"]) -> MetaOapg.properties.device: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["is_embedding"]) -> MetaOapg.properties.is_embedding: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["mem_fraction_static"]) -> MetaOapg.properties.mem_fraction_static: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_running_requests"]) -> MetaOapg.properties.max_running_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_total_tokens"]) -> MetaOapg.properties.max_total_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> MetaOapg.properties.chunked_prefill_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> MetaOapg.properties.max_prefill_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["schedule_policy"]) -> MetaOapg.properties.schedule_policy: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> MetaOapg.properties.schedule_conservativeness: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> MetaOapg.properties.cpu_offload_gb: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> MetaOapg.properties.prefill_only_one_req: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream_interval"]) -> MetaOapg.properties.stream_interval: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["random_seed"]) -> MetaOapg.properties.random_seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> MetaOapg.properties.constrained_json_whitespace_pattern: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["watchdog_timeout"]) -> MetaOapg.properties.watchdog_timeout: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["download_dir"]) -> MetaOapg.properties.download_dir: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["base_gpu_id"]) -> MetaOapg.properties.base_gpu_id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["log_level"]) -> MetaOapg.properties.log_level: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["log_level_http"]) -> MetaOapg.properties.log_level_http: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["log_requests"]) -> MetaOapg.properties.log_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["show_time_cost"]) -> MetaOapg.properties.show_time_cost: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_metrics"]) -> MetaOapg.properties.enable_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["decode_log_interval"]) -> MetaOapg.properties.decode_log_interval: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["api_key"]) -> MetaOapg.properties.api_key: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["file_storage_pth"]) -> MetaOapg.properties.file_storage_pth: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_cache_report"]) -> MetaOapg.properties.enable_cache_report: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["data_parallel_size"]) -> MetaOapg.properties.data_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_balance_method"]) -> MetaOapg.properties.load_balance_method: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["expert_parallel_size"]) -> MetaOapg.properties.expert_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dist_init_addr"]) -> MetaOapg.properties.dist_init_addr: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nnodes"]) -> MetaOapg.properties.nnodes: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["node_rank"]) -> MetaOapg.properties.node_rank: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["json_model_override_args"]) -> MetaOapg.properties.json_model_override_args: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["lora_paths"]) -> MetaOapg.properties.lora_paths: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> MetaOapg.properties.max_loras_per_batch: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["sampling_backend"]) -> MetaOapg.properties.sampling_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["grammar_backend"]) -> MetaOapg.properties.grammar_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_algorithm"]) -> MetaOapg.properties.speculative_algorithm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> MetaOapg.properties.speculative_draft_model_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_num_steps"]) -> MetaOapg.properties.speculative_num_steps: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> MetaOapg.properties.speculative_num_draft_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> MetaOapg.properties.speculative_eagle_topk: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> MetaOapg.properties.enable_double_sparsity: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> MetaOapg.properties.ds_channel_config_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> MetaOapg.properties.ds_heavy_channel_num: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> MetaOapg.properties.ds_heavy_token_num: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> MetaOapg.properties.ds_heavy_channel_type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> MetaOapg.properties.ds_sparse_decode_threshold: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_radix_cache"]) -> MetaOapg.properties.disable_radix_cache: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_jump_forward"]) -> MetaOapg.properties.disable_jump_forward: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> MetaOapg.properties.disable_cuda_graph: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> MetaOapg.properties.disable_cuda_graph_padding: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> MetaOapg.properties.disable_outlines_disk_cache: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> MetaOapg.properties.disable_custom_all_reduce: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_mla"]) -> MetaOapg.properties.disable_mla: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> MetaOapg.properties.disable_overlap_schedule: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> MetaOapg.properties.enable_mixed_chunk: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_dp_attention"]) -> MetaOapg.properties.enable_dp_attention: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_ep_moe"]) -> MetaOapg.properties.enable_ep_moe: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_torch_compile"]) -> MetaOapg.properties.enable_torch_compile: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> MetaOapg.properties.torch_compile_max_bs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> MetaOapg.properties.cuda_graph_max_bs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> MetaOapg.properties.cuda_graph_bs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["torchao_config"]) -> MetaOapg.properties.torchao_config: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_nan_detection"]) -> MetaOapg.properties.enable_nan_detection: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_p2p_check"]) -> MetaOapg.properties.enable_p2p_check: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> MetaOapg.properties.triton_attention_reduce_in_fp32: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> MetaOapg.properties.triton_attention_num_kv_splits: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> MetaOapg.properties.num_continuous_decode_steps: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> MetaOapg.properties.delete_ckpt_after_loading: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_memory_saver"]) -> MetaOapg.properties.enable_memory_saver: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> MetaOapg.properties.allow_auto_truncate: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> MetaOapg.properties.enable_custom_logit_processor: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["huggingface_repo"]) -> MetaOapg.properties.huggingface_repo: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", "inference_framework", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tp_size"]) -> typing.Union[MetaOapg.properties.tp_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> typing.Union[MetaOapg.properties.kv_cache_dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["context_length"]) -> typing.Union[MetaOapg.properties.context_length, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["device"]) -> typing.Union[MetaOapg.properties.device, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["is_embedding"]) -> typing.Union[MetaOapg.properties.is_embedding, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["mem_fraction_static"]) -> typing.Union[MetaOapg.properties.mem_fraction_static, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_running_requests"]) -> typing.Union[MetaOapg.properties.max_running_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_total_tokens"]) -> typing.Union[MetaOapg.properties.max_total_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> typing.Union[MetaOapg.properties.chunked_prefill_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> typing.Union[MetaOapg.properties.max_prefill_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["schedule_policy"]) -> typing.Union[MetaOapg.properties.schedule_policy, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> typing.Union[MetaOapg.properties.schedule_conservativeness, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> typing.Union[MetaOapg.properties.cpu_offload_gb, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> typing.Union[MetaOapg.properties.prefill_only_one_req, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream_interval"]) -> typing.Union[MetaOapg.properties.stream_interval, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["random_seed"]) -> typing.Union[MetaOapg.properties.random_seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["watchdog_timeout"]) -> typing.Union[MetaOapg.properties.watchdog_timeout, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["download_dir"]) -> typing.Union[MetaOapg.properties.download_dir, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["base_gpu_id"]) -> typing.Union[MetaOapg.properties.base_gpu_id, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["log_level"]) -> typing.Union[MetaOapg.properties.log_level, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["log_level_http"]) -> typing.Union[MetaOapg.properties.log_level_http, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["log_requests"]) -> typing.Union[MetaOapg.properties.log_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["show_time_cost"]) -> typing.Union[MetaOapg.properties.show_time_cost, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_metrics"]) -> typing.Union[MetaOapg.properties.enable_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["decode_log_interval"]) -> typing.Union[MetaOapg.properties.decode_log_interval, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["api_key"]) -> typing.Union[MetaOapg.properties.api_key, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["file_storage_pth"]) -> typing.Union[MetaOapg.properties.file_storage_pth, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_cache_report"]) -> typing.Union[MetaOapg.properties.enable_cache_report, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["data_parallel_size"]) -> typing.Union[MetaOapg.properties.data_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_balance_method"]) -> typing.Union[MetaOapg.properties.load_balance_method, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["expert_parallel_size"]) -> typing.Union[MetaOapg.properties.expert_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dist_init_addr"]) -> typing.Union[MetaOapg.properties.dist_init_addr, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nnodes"]) -> typing.Union[MetaOapg.properties.nnodes, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["node_rank"]) -> typing.Union[MetaOapg.properties.node_rank, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["json_model_override_args"]) -> typing.Union[MetaOapg.properties.json_model_override_args, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["lora_paths"]) -> typing.Union[MetaOapg.properties.lora_paths, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> typing.Union[MetaOapg.properties.max_loras_per_batch, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["sampling_backend"]) -> typing.Union[MetaOapg.properties.sampling_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["grammar_backend"]) -> typing.Union[MetaOapg.properties.grammar_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_algorithm"]) -> typing.Union[MetaOapg.properties.speculative_algorithm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> typing.Union[MetaOapg.properties.speculative_draft_model_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_steps"]) -> typing.Union[MetaOapg.properties.speculative_num_steps, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> typing.Union[MetaOapg.properties.speculative_num_draft_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> typing.Union[MetaOapg.properties.speculative_eagle_topk, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> typing.Union[MetaOapg.properties.enable_double_sparsity, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> typing.Union[MetaOapg.properties.ds_channel_config_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_num, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_token_num, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_type, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_radix_cache"]) -> typing.Union[MetaOapg.properties.disable_radix_cache, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_jump_forward"]) -> typing.Union[MetaOapg.properties.disable_jump_forward, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph_padding, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> typing.Union[MetaOapg.properties.disable_outlines_disk_cache, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> typing.Union[MetaOapg.properties.disable_custom_all_reduce, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_mla"]) -> typing.Union[MetaOapg.properties.disable_mla, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> typing.Union[MetaOapg.properties.disable_overlap_schedule, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> typing.Union[MetaOapg.properties.enable_mixed_chunk, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_dp_attention"]) -> typing.Union[MetaOapg.properties.enable_dp_attention, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_ep_moe"]) -> typing.Union[MetaOapg.properties.enable_ep_moe, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_torch_compile"]) -> typing.Union[MetaOapg.properties.enable_torch_compile, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> typing.Union[MetaOapg.properties.torch_compile_max_bs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_max_bs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_bs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["torchao_config"]) -> typing.Union[MetaOapg.properties.torchao_config, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_nan_detection"]) -> typing.Union[MetaOapg.properties.enable_nan_detection, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_p2p_check"]) -> typing.Union[MetaOapg.properties.enable_p2p_check, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> typing.Union[MetaOapg.properties.num_continuous_decode_steps, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> typing.Union[MetaOapg.properties.delete_ckpt_after_loading, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_memory_saver"]) -> typing.Union[MetaOapg.properties.enable_memory_saver, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> typing.Union[MetaOapg.properties.allow_auto_truncate, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> typing.Union[MetaOapg.properties.enable_custom_logit_processor, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["huggingface_repo"]) -> typing.Union[MetaOapg.properties.huggingface_repo, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", "inference_framework", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + name: typing.Union[MetaOapg.properties.name, str, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, + endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, + trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, + tp_size: typing.Union[MetaOapg.properties.tp_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, + load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, + dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, + kv_cache_dtype: typing.Union[MetaOapg.properties.kv_cache_dtype, None, str, schemas.Unset] = schemas.unset, + quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, + quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, + context_length: typing.Union[MetaOapg.properties.context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + device: typing.Union[MetaOapg.properties.device, None, str, schemas.Unset] = schemas.unset, + served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + is_embedding: typing.Union[MetaOapg.properties.is_embedding, None, bool, schemas.Unset] = schemas.unset, + revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, + mem_fraction_static: typing.Union[MetaOapg.properties.mem_fraction_static, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + max_running_requests: typing.Union[MetaOapg.properties.max_running_requests, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_total_tokens: typing.Union[MetaOapg.properties.max_total_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + chunked_prefill_size: typing.Union[MetaOapg.properties.chunked_prefill_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_prefill_tokens: typing.Union[MetaOapg.properties.max_prefill_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + schedule_policy: typing.Union[MetaOapg.properties.schedule_policy, None, str, schemas.Unset] = schemas.unset, + schedule_conservativeness: typing.Union[MetaOapg.properties.schedule_conservativeness, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + cpu_offload_gb: typing.Union[MetaOapg.properties.cpu_offload_gb, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + prefill_only_one_req: typing.Union[MetaOapg.properties.prefill_only_one_req, None, bool, schemas.Unset] = schemas.unset, + stream_interval: typing.Union[MetaOapg.properties.stream_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + random_seed: typing.Union[MetaOapg.properties.random_seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + constrained_json_whitespace_pattern: typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, + watchdog_timeout: typing.Union[MetaOapg.properties.watchdog_timeout, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + download_dir: typing.Union[MetaOapg.properties.download_dir, None, str, schemas.Unset] = schemas.unset, + base_gpu_id: typing.Union[MetaOapg.properties.base_gpu_id, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + log_level: typing.Union[MetaOapg.properties.log_level, None, str, schemas.Unset] = schemas.unset, + log_level_http: typing.Union[MetaOapg.properties.log_level_http, None, str, schemas.Unset] = schemas.unset, + log_requests: typing.Union[MetaOapg.properties.log_requests, None, bool, schemas.Unset] = schemas.unset, + show_time_cost: typing.Union[MetaOapg.properties.show_time_cost, None, bool, schemas.Unset] = schemas.unset, + enable_metrics: typing.Union[MetaOapg.properties.enable_metrics, None, bool, schemas.Unset] = schemas.unset, + decode_log_interval: typing.Union[MetaOapg.properties.decode_log_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + api_key: typing.Union[MetaOapg.properties.api_key, None, str, schemas.Unset] = schemas.unset, + file_storage_pth: typing.Union[MetaOapg.properties.file_storage_pth, None, str, schemas.Unset] = schemas.unset, + enable_cache_report: typing.Union[MetaOapg.properties.enable_cache_report, None, bool, schemas.Unset] = schemas.unset, + data_parallel_size: typing.Union[MetaOapg.properties.data_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + load_balance_method: typing.Union[MetaOapg.properties.load_balance_method, None, str, schemas.Unset] = schemas.unset, + expert_parallel_size: typing.Union[MetaOapg.properties.expert_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + dist_init_addr: typing.Union[MetaOapg.properties.dist_init_addr, None, str, schemas.Unset] = schemas.unset, + nnodes: typing.Union[MetaOapg.properties.nnodes, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + node_rank: typing.Union[MetaOapg.properties.node_rank, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + json_model_override_args: typing.Union[MetaOapg.properties.json_model_override_args, None, str, schemas.Unset] = schemas.unset, + lora_paths: typing.Union[MetaOapg.properties.lora_paths, list, tuple, None, schemas.Unset] = schemas.unset, + max_loras_per_batch: typing.Union[MetaOapg.properties.max_loras_per_batch, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, + sampling_backend: typing.Union[MetaOapg.properties.sampling_backend, None, str, schemas.Unset] = schemas.unset, + grammar_backend: typing.Union[MetaOapg.properties.grammar_backend, None, str, schemas.Unset] = schemas.unset, + speculative_algorithm: typing.Union[MetaOapg.properties.speculative_algorithm, None, str, schemas.Unset] = schemas.unset, + speculative_draft_model_path: typing.Union[MetaOapg.properties.speculative_draft_model_path, None, str, schemas.Unset] = schemas.unset, + speculative_num_steps: typing.Union[MetaOapg.properties.speculative_num_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + speculative_num_draft_tokens: typing.Union[MetaOapg.properties.speculative_num_draft_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + speculative_eagle_topk: typing.Union[MetaOapg.properties.speculative_eagle_topk, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enable_double_sparsity: typing.Union[MetaOapg.properties.enable_double_sparsity, None, bool, schemas.Unset] = schemas.unset, + ds_channel_config_path: typing.Union[MetaOapg.properties.ds_channel_config_path, None, str, schemas.Unset] = schemas.unset, + ds_heavy_channel_num: typing.Union[MetaOapg.properties.ds_heavy_channel_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + ds_heavy_token_num: typing.Union[MetaOapg.properties.ds_heavy_token_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + ds_heavy_channel_type: typing.Union[MetaOapg.properties.ds_heavy_channel_type, None, str, schemas.Unset] = schemas.unset, + ds_sparse_decode_threshold: typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + disable_radix_cache: typing.Union[MetaOapg.properties.disable_radix_cache, None, bool, schemas.Unset] = schemas.unset, + disable_jump_forward: typing.Union[MetaOapg.properties.disable_jump_forward, None, bool, schemas.Unset] = schemas.unset, + disable_cuda_graph: typing.Union[MetaOapg.properties.disable_cuda_graph, None, bool, schemas.Unset] = schemas.unset, + disable_cuda_graph_padding: typing.Union[MetaOapg.properties.disable_cuda_graph_padding, None, bool, schemas.Unset] = schemas.unset, + disable_outlines_disk_cache: typing.Union[MetaOapg.properties.disable_outlines_disk_cache, None, bool, schemas.Unset] = schemas.unset, + disable_custom_all_reduce: typing.Union[MetaOapg.properties.disable_custom_all_reduce, None, bool, schemas.Unset] = schemas.unset, + disable_mla: typing.Union[MetaOapg.properties.disable_mla, None, bool, schemas.Unset] = schemas.unset, + disable_overlap_schedule: typing.Union[MetaOapg.properties.disable_overlap_schedule, None, bool, schemas.Unset] = schemas.unset, + enable_mixed_chunk: typing.Union[MetaOapg.properties.enable_mixed_chunk, None, bool, schemas.Unset] = schemas.unset, + enable_dp_attention: typing.Union[MetaOapg.properties.enable_dp_attention, None, bool, schemas.Unset] = schemas.unset, + enable_ep_moe: typing.Union[MetaOapg.properties.enable_ep_moe, None, bool, schemas.Unset] = schemas.unset, + enable_torch_compile: typing.Union[MetaOapg.properties.enable_torch_compile, None, bool, schemas.Unset] = schemas.unset, + torch_compile_max_bs: typing.Union[MetaOapg.properties.torch_compile_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + cuda_graph_max_bs: typing.Union[MetaOapg.properties.cuda_graph_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + cuda_graph_bs: typing.Union[MetaOapg.properties.cuda_graph_bs, list, tuple, None, schemas.Unset] = schemas.unset, + torchao_config: typing.Union[MetaOapg.properties.torchao_config, None, str, schemas.Unset] = schemas.unset, + enable_nan_detection: typing.Union[MetaOapg.properties.enable_nan_detection, None, bool, schemas.Unset] = schemas.unset, + enable_p2p_check: typing.Union[MetaOapg.properties.enable_p2p_check, None, bool, schemas.Unset] = schemas.unset, + triton_attention_reduce_in_fp32: typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, None, bool, schemas.Unset] = schemas.unset, + triton_attention_num_kv_splits: typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + num_continuous_decode_steps: typing.Union[MetaOapg.properties.num_continuous_decode_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + delete_ckpt_after_loading: typing.Union[MetaOapg.properties.delete_ckpt_after_loading, None, bool, schemas.Unset] = schemas.unset, + enable_memory_saver: typing.Union[MetaOapg.properties.enable_memory_saver, None, bool, schemas.Unset] = schemas.unset, + allow_auto_truncate: typing.Union[MetaOapg.properties.allow_auto_truncate, None, bool, schemas.Unset] = schemas.unset, + enable_custom_logit_processor: typing.Union[MetaOapg.properties.enable_custom_logit_processor, None, bool, schemas.Unset] = schemas.unset, + tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, + huggingface_repo: typing.Union[MetaOapg.properties.huggingface_repo, None, str, schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateSGLangModelEndpointRequest': + return super().__new__( + cls, + *_args, + metadata=metadata, + model_name=model_name, + max_workers=max_workers, + min_workers=min_workers, + name=name, + per_worker=per_worker, + labels=labels, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + source=source, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + endpoint_type=endpoint_type, + trust_remote_code=trust_remote_code, + tp_size=tp_size, + skip_tokenizer_init=skip_tokenizer_init, + load_format=load_format, + dtype=dtype, + kv_cache_dtype=kv_cache_dtype, + quantization_param_path=quantization_param_path, + quantization=quantization, + context_length=context_length, + device=device, + served_model_name=served_model_name, + chat_template=chat_template, + is_embedding=is_embedding, + revision=revision, + mem_fraction_static=mem_fraction_static, + max_running_requests=max_running_requests, + max_total_tokens=max_total_tokens, + chunked_prefill_size=chunked_prefill_size, + max_prefill_tokens=max_prefill_tokens, + schedule_policy=schedule_policy, + schedule_conservativeness=schedule_conservativeness, + cpu_offload_gb=cpu_offload_gb, + prefill_only_one_req=prefill_only_one_req, + stream_interval=stream_interval, + random_seed=random_seed, + constrained_json_whitespace_pattern=constrained_json_whitespace_pattern, + watchdog_timeout=watchdog_timeout, + download_dir=download_dir, + base_gpu_id=base_gpu_id, + log_level=log_level, + log_level_http=log_level_http, + log_requests=log_requests, + show_time_cost=show_time_cost, + enable_metrics=enable_metrics, + decode_log_interval=decode_log_interval, + api_key=api_key, + file_storage_pth=file_storage_pth, + enable_cache_report=enable_cache_report, + data_parallel_size=data_parallel_size, + load_balance_method=load_balance_method, + expert_parallel_size=expert_parallel_size, + dist_init_addr=dist_init_addr, + nnodes=nnodes, + node_rank=node_rank, + json_model_override_args=json_model_override_args, + lora_paths=lora_paths, + max_loras_per_batch=max_loras_per_batch, + attention_backend=attention_backend, + sampling_backend=sampling_backend, + grammar_backend=grammar_backend, + speculative_algorithm=speculative_algorithm, + speculative_draft_model_path=speculative_draft_model_path, + speculative_num_steps=speculative_num_steps, + speculative_num_draft_tokens=speculative_num_draft_tokens, + speculative_eagle_topk=speculative_eagle_topk, + enable_double_sparsity=enable_double_sparsity, + ds_channel_config_path=ds_channel_config_path, + ds_heavy_channel_num=ds_heavy_channel_num, + ds_heavy_token_num=ds_heavy_token_num, + ds_heavy_channel_type=ds_heavy_channel_type, + ds_sparse_decode_threshold=ds_sparse_decode_threshold, + disable_radix_cache=disable_radix_cache, + disable_jump_forward=disable_jump_forward, + disable_cuda_graph=disable_cuda_graph, + disable_cuda_graph_padding=disable_cuda_graph_padding, + disable_outlines_disk_cache=disable_outlines_disk_cache, + disable_custom_all_reduce=disable_custom_all_reduce, + disable_mla=disable_mla, + disable_overlap_schedule=disable_overlap_schedule, + enable_mixed_chunk=enable_mixed_chunk, + enable_dp_attention=enable_dp_attention, + enable_ep_moe=enable_ep_moe, + enable_torch_compile=enable_torch_compile, + torch_compile_max_bs=torch_compile_max_bs, + cuda_graph_max_bs=cuda_graph_max_bs, + cuda_graph_bs=cuda_graph_bs, + torchao_config=torchao_config, + enable_nan_detection=enable_nan_detection, + enable_p2p_check=enable_p2p_check, + triton_attention_reduce_in_fp32=triton_attention_reduce_in_fp32, + triton_attention_num_kv_splits=triton_attention_num_kv_splits, + num_continuous_decode_steps=num_continuous_decode_steps, + delete_ckpt_after_loading=delete_ckpt_after_loading, + enable_memory_saver=enable_memory_saver, + allow_auto_truncate=allow_auto_truncate, + enable_custom_logit_processor=enable_custom_logit_processor, + tool_call_parser=tool_call_parser, + huggingface_repo=huggingface_repo, + inference_framework=inference_framework, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.model_endpoint_type import ModelEndpointType +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py b/launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py new file mode 100644 index 00000000..c1fa53b7 --- /dev/null +++ b/launch/api_client/model/create_tensor_rtllm_model_endpoint_request.py @@ -0,0 +1,842 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateTensorRTLLMModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "metadata", + "model_name", + "max_workers", + "min_workers", + "name", + "per_worker", + "labels", + } + + class properties: + name = schemas.StrSchema + model_name = schemas.StrSchema + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + min_workers = schemas.IntSchema + max_workers = schemas.IntSchema + per_worker = schemas.IntSchema + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + inference_framework_image_tag = schemas.StrSchema + num_shards = schemas.IntSchema + + @staticmethod + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "tensorrt_llm": "TENSORRT_LLM", + } + + @schemas.classproperty + def TENSORRT_LLM(cls): + return cls("tensorrt_llm") + __annotations__ = { + "name": name, + "model_name": model_name, + "metadata": metadata, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "source": source, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "endpoint_type": endpoint_type, + "inference_framework": inference_framework, + } + + metadata: MetaOapg.properties.metadata + model_name: MetaOapg.properties.model_name + max_workers: MetaOapg.properties.max_workers + min_workers: MetaOapg.properties.min_workers + name: MetaOapg.properties.name + per_worker: MetaOapg.properties.per_worker + labels: MetaOapg.properties.labels + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + name: typing.Union[MetaOapg.properties.name, str, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, + endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateTensorRTLLMModelEndpointRequest': + return super().__new__( + cls, + *_args, + metadata=metadata, + model_name=model_name, + max_workers=max_workers, + min_workers=min_workers, + name=name, + per_worker=per_worker, + labels=labels, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + source=source, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + endpoint_type=endpoint_type, + inference_framework=inference_framework, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.model_endpoint_type import ModelEndpointType +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_text_generation_inference_model_endpoint_request.py b/launch/api_client/model/create_text_generation_inference_model_endpoint_request.py new file mode 100644 index 00000000..b6aa42de --- /dev/null +++ b/launch/api_client/model/create_text_generation_inference_model_endpoint_request.py @@ -0,0 +1,842 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateTextGenerationInferenceModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "metadata", + "model_name", + "max_workers", + "min_workers", + "name", + "per_worker", + "labels", + } + + class properties: + name = schemas.StrSchema + model_name = schemas.StrSchema + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + min_workers = schemas.IntSchema + max_workers = schemas.IntSchema + per_worker = schemas.IntSchema + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + inference_framework_image_tag = schemas.StrSchema + num_shards = schemas.IntSchema + + @staticmethod + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "text_generation_inference": "TEXT_GENERATION_INFERENCE", + } + + @schemas.classproperty + def TEXT_GENERATION_INFERENCE(cls): + return cls("text_generation_inference") + __annotations__ = { + "name": name, + "model_name": model_name, + "metadata": metadata, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "source": source, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "endpoint_type": endpoint_type, + "inference_framework": inference_framework, + } + + metadata: MetaOapg.properties.metadata + model_name: MetaOapg.properties.model_name + max_workers: MetaOapg.properties.max_workers + min_workers: MetaOapg.properties.min_workers + name: MetaOapg.properties.name + per_worker: MetaOapg.properties.per_worker + labels: MetaOapg.properties.labels + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "inference_framework", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + name: typing.Union[MetaOapg.properties.name, str, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, + endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateTextGenerationInferenceModelEndpointRequest': + return super().__new__( + cls, + *_args, + metadata=metadata, + model_name=model_name, + max_workers=max_workers, + min_workers=min_workers, + name=name, + per_worker=per_worker, + labels=labels, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + source=source, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + endpoint_type=endpoint_type, + inference_framework=inference_framework, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.model_endpoint_type import ModelEndpointType +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/create_trigger_v1_request.py b/launch/api_client/model/create_trigger_v1_request.py index f11e41ba..93e7db58 100644 --- a/launch/api_client/model/create_trigger_v1_request.py +++ b/launch/api_client/model/create_trigger_v1_request.py @@ -23,203 +23,161 @@ from launch.api_client import schemas # noqa: F401 -class CreateTriggerV1Request(schemas.DictSchema): +class CreateTriggerV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "cron_schedule", "bundle_id", "name", } - + class properties: - bundle_id = schemas.StrSchema - cron_schedule = schemas.StrSchema name = schemas.StrSchema - default_job_config = schemas.DictSchema - - class default_job_metadata(schemas.DictSchema): + cron_schedule = schemas.StrSchema + bundle_id = schemas.StrSchema + + + class default_job_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'default_job_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_job_metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "default_job_metadata": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'default_job_metadata': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - __annotations__ = { - "bundle_id": bundle_id, - "cron_schedule": cron_schedule, "name": name, + "cron_schedule": cron_schedule, + "bundle_id": bundle_id, "default_job_config": default_job_config, "default_job_metadata": default_job_metadata, } - + cron_schedule: MetaOapg.properties.cron_schedule bundle_id: MetaOapg.properties.bundle_id name: MetaOapg.properties.name - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: - ... - + def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_config"] - ) -> MetaOapg.properties.default_job_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_job_config"]) -> MetaOapg.properties.default_job_config: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> MetaOapg.properties.default_job_metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_job_metadata"]) -> MetaOapg.properties.default_job_metadata: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_id", - "cron_schedule", - "name", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "cron_schedule", "bundle_id", "default_job_config", "default_job_metadata", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_config"] - ) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_job_config"]) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_job_metadata"]) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_id", - "cron_schedule", - "name", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "cron_schedule", "bundle_id", "default_job_config", "default_job_metadata", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cron_schedule: typing.Union[ - MetaOapg.properties.cron_schedule, - str, - ], - bundle_id: typing.Union[ - MetaOapg.properties.bundle_id, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - default_job_config: typing.Union[ - MetaOapg.properties.default_job_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - default_job_metadata: typing.Union[ - MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, str, ], + bundle_id: typing.Union[MetaOapg.properties.bundle_id, str, ], + name: typing.Union[MetaOapg.properties.name, str, ], + default_job_config: typing.Union[MetaOapg.properties.default_job_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_job_metadata: typing.Union[MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateTriggerV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateTriggerV1Request': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_trigger_v1_request.pyi b/launch/api_client/model/create_trigger_v1_request.pyi deleted file mode 100644 index d7b7697c..00000000 --- a/launch/api_client/model/create_trigger_v1_request.pyi +++ /dev/null @@ -1,200 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateTriggerV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "cron_schedule", - "bundle_id", - "name", - } - - class properties: - bundle_id = schemas.StrSchema - cron_schedule = schemas.StrSchema - name = schemas.StrSchema - default_job_config = schemas.DictSchema - - class default_job_metadata(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "default_job_metadata": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "bundle_id": bundle_id, - "cron_schedule": cron_schedule, - "name": name, - "default_job_config": default_job_config, - "default_job_metadata": default_job_metadata, - } - cron_schedule: MetaOapg.properties.cron_schedule - bundle_id: MetaOapg.properties.bundle_id - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_config"] - ) -> MetaOapg.properties.default_job_config: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> MetaOapg.properties.default_job_metadata: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_id", - "cron_schedule", - "name", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bundle_id"]) -> MetaOapg.properties.bundle_id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_config"] - ) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_id", - "cron_schedule", - "name", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cron_schedule: typing.Union[ - MetaOapg.properties.cron_schedule, - str, - ], - bundle_id: typing.Union[ - MetaOapg.properties.bundle_id, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - default_job_config: typing.Union[ - MetaOapg.properties.default_job_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - default_job_metadata: typing.Union[ - MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateTriggerV1Request": - return super().__new__( - cls, - *_args, - cron_schedule=cron_schedule, - bundle_id=bundle_id, - name=name, - default_job_config=default_job_config, - default_job_metadata=default_job_metadata, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_trigger_v1_response.py b/launch/api_client/model/create_trigger_v1_response.py index fb2e5437..7ce0aad3 100644 --- a/launch/api_client/model/create_trigger_v1_response.py +++ b/launch/api_client/model/create_trigger_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class CreateTriggerV1Response(schemas.DictSchema): +class CreateTriggerV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "trigger_id", } - + class properties: trigger_id = schemas.StrSchema __annotations__ = { "trigger_id": trigger_id, } - + trigger_id: MetaOapg.properties.trigger_id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["trigger_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["trigger_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["trigger_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["trigger_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - trigger_id: typing.Union[ - MetaOapg.properties.trigger_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + trigger_id: typing.Union[MetaOapg.properties.trigger_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateTriggerV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateTriggerV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/create_trigger_v1_response.pyi b/launch/api_client/model/create_trigger_v1_response.pyi deleted file mode 100644 index 9c2409b4..00000000 --- a/launch/api_client/model/create_trigger_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CreateTriggerV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "trigger_id", - } - - class properties: - trigger_id = schemas.StrSchema - __annotations__ = { - "trigger_id": trigger_id, - } - trigger_id: MetaOapg.properties.trigger_id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["trigger_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["trigger_id"]) -> MetaOapg.properties.trigger_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["trigger_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - trigger_id: typing.Union[ - MetaOapg.properties.trigger_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateTriggerV1Response": - return super().__new__( - cls, - *_args, - trigger_id=trigger_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/create_vllm_model_endpoint_request.py b/launch/api_client/model/create_vllm_model_endpoint_request.py new file mode 100644 index 00000000..c3d4cfbc --- /dev/null +++ b/launch/api_client/model/create_vllm_model_endpoint_request.py @@ -0,0 +1,1983 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class CreateVLLMModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "metadata", + "model_name", + "max_workers", + "min_workers", + "name", + "per_worker", + "labels", + } + + class properties: + name = schemas.StrSchema + model_name = schemas.StrSchema + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + min_workers = schemas.IntSchema + max_workers = schemas.IntSchema + per_worker = schemas.IntSchema + + + class labels( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + inference_framework_image_tag = schemas.StrSchema + num_shards = schemas.IntSchema + + @staticmethod + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + + + class max_gpu_memory_utilization( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_gpu_memory_utilization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class attention_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'attention_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_model_len( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_model_len': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_seqs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_seqs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enforce_eager( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enforce_eager': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class trust_remote_code( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'trust_remote_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class pipeline_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'pipeline_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tensor_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tensor_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_log_requests( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_log_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tool_call_parser( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tool_call_parser': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_auto_tool_choice( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_auto_tool_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class config_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'config_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer_mode( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_mode': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class limit_mm_per_prompt( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'limit_mm_per_prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_batched_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_batched_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class code_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'code_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class rope_scaling( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'rope_scaling': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class tokenizer_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization_param_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization_param_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_seq_len_to_capture( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_seq_len_to_capture': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_sliding_window( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_sliding_window': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_tokenizer_init( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_tokenizer_init': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class served_model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'served_model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class override_neuron_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'override_neuron_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class mm_processor_kwargs( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'mm_processor_kwargs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class block_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'block_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class gpu_memory_utilization( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpu_memory_utilization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class swap_space( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'swap_space': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cache_dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cache_dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_gpu_blocks_override( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_gpu_blocks_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_prefix_caching( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_prefix_caching': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "vllm": "VLLM", + } + + @schemas.classproperty + def VLLM(cls): + return cls("vllm") + __annotations__ = { + "name": name, + "model_name": model_name, + "metadata": metadata, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "source": source, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "endpoint_type": endpoint_type, + "max_gpu_memory_utilization": max_gpu_memory_utilization, + "attention_backend": attention_backend, + "max_model_len": max_model_len, + "max_num_seqs": max_num_seqs, + "enforce_eager": enforce_eager, + "trust_remote_code": trust_remote_code, + "pipeline_parallel_size": pipeline_parallel_size, + "tensor_parallel_size": tensor_parallel_size, + "quantization": quantization, + "disable_log_requests": disable_log_requests, + "chat_template": chat_template, + "tool_call_parser": tool_call_parser, + "enable_auto_tool_choice": enable_auto_tool_choice, + "load_format": load_format, + "config_format": config_format, + "tokenizer_mode": tokenizer_mode, + "limit_mm_per_prompt": limit_mm_per_prompt, + "max_num_batched_tokens": max_num_batched_tokens, + "tokenizer": tokenizer, + "dtype": dtype, + "seed": seed, + "revision": revision, + "code_revision": code_revision, + "rope_scaling": rope_scaling, + "tokenizer_revision": tokenizer_revision, + "quantization_param_path": quantization_param_path, + "max_seq_len_to_capture": max_seq_len_to_capture, + "disable_sliding_window": disable_sliding_window, + "skip_tokenizer_init": skip_tokenizer_init, + "served_model_name": served_model_name, + "override_neuron_config": override_neuron_config, + "mm_processor_kwargs": mm_processor_kwargs, + "block_size": block_size, + "gpu_memory_utilization": gpu_memory_utilization, + "swap_space": swap_space, + "cache_dtype": cache_dtype, + "num_gpu_blocks_override": num_gpu_blocks_override, + "enable_prefix_caching": enable_prefix_caching, + "inference_framework": inference_framework, + } + + metadata: MetaOapg.properties.metadata + model_name: MetaOapg.properties.model_name + max_workers: MetaOapg.properties.max_workers + min_workers: MetaOapg.properties.min_workers + name: MetaOapg.properties.name + per_worker: MetaOapg.properties.per_worker + labels: MetaOapg.properties.labels + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> MetaOapg.properties.max_gpu_memory_utilization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "inference_framework", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> typing.Union['ModelEndpointType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.max_gpu_memory_utilization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "model_name", "metadata", "min_workers", "max_workers", "per_worker", "labels", "quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "source", "inference_framework_image_tag", "num_shards", "endpoint_type", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", "inference_framework", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + name: typing.Union[MetaOapg.properties.name, str, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, + endpoint_type: typing.Union['ModelEndpointType', schemas.Unset] = schemas.unset, + max_gpu_memory_utilization: typing.Union[MetaOapg.properties.max_gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, + max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, + trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, + pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, + disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, + enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, + load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, + config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, + tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, + limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, + max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, + dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, + code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, + rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, + quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, + max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, + skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, + served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, + override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, + num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CreateVLLMModelEndpointRequest': + return super().__new__( + cls, + *_args, + metadata=metadata, + model_name=model_name, + max_workers=max_workers, + min_workers=min_workers, + name=name, + per_worker=per_worker, + labels=labels, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + source=source, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + endpoint_type=endpoint_type, + max_gpu_memory_utilization=max_gpu_memory_utilization, + attention_backend=attention_backend, + max_model_len=max_model_len, + max_num_seqs=max_num_seqs, + enforce_eager=enforce_eager, + trust_remote_code=trust_remote_code, + pipeline_parallel_size=pipeline_parallel_size, + tensor_parallel_size=tensor_parallel_size, + quantization=quantization, + disable_log_requests=disable_log_requests, + chat_template=chat_template, + tool_call_parser=tool_call_parser, + enable_auto_tool_choice=enable_auto_tool_choice, + load_format=load_format, + config_format=config_format, + tokenizer_mode=tokenizer_mode, + limit_mm_per_prompt=limit_mm_per_prompt, + max_num_batched_tokens=max_num_batched_tokens, + tokenizer=tokenizer, + dtype=dtype, + seed=seed, + revision=revision, + code_revision=code_revision, + rope_scaling=rope_scaling, + tokenizer_revision=tokenizer_revision, + quantization_param_path=quantization_param_path, + max_seq_len_to_capture=max_seq_len_to_capture, + disable_sliding_window=disable_sliding_window, + skip_tokenizer_init=skip_tokenizer_init, + served_model_name=served_model_name, + override_neuron_config=override_neuron_config, + mm_processor_kwargs=mm_processor_kwargs, + block_size=block_size, + gpu_memory_utilization=gpu_memory_utilization, + swap_space=swap_space, + cache_dtype=cache_dtype, + num_gpu_blocks_override=num_gpu_blocks_override, + enable_prefix_caching=enable_prefix_caching, + inference_framework=inference_framework, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.model_endpoint_type import ModelEndpointType +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/custom_framework.py b/launch/api_client/model/custom_framework.py index cf37e753..6af59b6d 100644 --- a/launch/api_client/model/custom_framework.py +++ b/launch/api_client/model/custom_framework.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class CustomFramework(schemas.DictSchema): +class CustomFramework( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,24 +34,31 @@ class CustomFramework(schemas.DictSchema): This is the entity-layer class for a custom framework specification. """ + class MetaOapg: required = { "image_repository", "framework_type", "image_tag", } - + class properties: - class framework_type(schemas.EnumBase, schemas.StrSchema): + + + class framework_type( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { "custom_base_image": "CUSTOM_BASE_IMAGE", } - + @schemas.classproperty def CUSTOM_BASE_IMAGE(cls): return cls("custom_base_image") - image_repository = schemas.StrSchema image_tag = schemas.StrSchema __annotations__ = { @@ -57,108 +66,53 @@ def CUSTOM_BASE_IMAGE(cls): "image_repository": image_repository, "image_tag": image_tag, } - + image_repository: MetaOapg.properties.image_repository framework_type: MetaOapg.properties.framework_type image_tag: MetaOapg.properties.image_tag - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: - ... - + def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: - ... - + def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "image_repository", - "image_tag", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "image_repository", "image_tag", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "image_repository", - "image_tag", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "image_repository", "image_tag", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - image_repository: typing.Union[ - MetaOapg.properties.image_repository, - str, - ], - framework_type: typing.Union[ - MetaOapg.properties.framework_type, - str, - ], - image_tag: typing.Union[ - MetaOapg.properties.image_tag, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + image_repository: typing.Union[MetaOapg.properties.image_repository, str, ], + framework_type: typing.Union[MetaOapg.properties.framework_type, str, ], + image_tag: typing.Union[MetaOapg.properties.image_tag, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CustomFramework": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'CustomFramework': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/custom_framework.pyi b/launch/api_client/model/custom_framework.pyi deleted file mode 100644 index 0ccc9317..00000000 --- a/launch/api_client/model/custom_framework.pyi +++ /dev/null @@ -1,147 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class CustomFramework(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a custom framework specification. - """ - - class MetaOapg: - required = { - "image_repository", - "framework_type", - "image_tag", - } - - class properties: - class framework_type(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def CUSTOM_BASE_IMAGE(cls): - return cls("custom_base_image") - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - __annotations__ = { - "framework_type": framework_type, - "image_repository": image_repository, - "image_tag": image_tag, - } - image_repository: MetaOapg.properties.image_repository - framework_type: MetaOapg.properties.framework_type - image_tag: MetaOapg.properties.image_tag - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "image_repository", - "image_tag", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["framework_type"] - ) -> MetaOapg.properties.framework_type: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "image_repository", - "image_tag", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - image_repository: typing.Union[ - MetaOapg.properties.image_repository, - str, - ], - framework_type: typing.Union[ - MetaOapg.properties.framework_type, - str, - ], - image_tag: typing.Union[ - MetaOapg.properties.image_tag, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CustomFramework": - return super().__new__( - cls, - *_args, - image_repository=image_repository, - framework_type=framework_type, - image_tag=image_tag, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/delete_file_response.py b/launch/api_client/model/delete_file_response.py index 2a9a3d53..eb5a0fab 100644 --- a/launch/api_client/model/delete_file_response.py +++ b/launch/api_client/model/delete_file_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class DeleteFileResponse(schemas.DictSchema): +class DeleteFileResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,82 +34,48 @@ class DeleteFileResponse(schemas.DictSchema): Response object for deleting a file. """ + class MetaOapg: required = { "deleted", } - + class properties: deleted = schemas.BoolSchema __annotations__ = { "deleted": deleted, } - + deleted: MetaOapg.properties.deleted - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: - ... - + def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - deleted: typing.Union[ - MetaOapg.properties.deleted, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + deleted: typing.Union[MetaOapg.properties.deleted, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteFileResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'DeleteFileResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/delete_llm_endpoint_response.py b/launch/api_client/model/delete_llm_endpoint_response.py index 9cdb62f9..c3a2a8c5 100644 --- a/launch/api_client/model/delete_llm_endpoint_response.py +++ b/launch/api_client/model/delete_llm_endpoint_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class DeleteLLMEndpointResponse(schemas.DictSchema): +class DeleteLLMEndpointResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "deleted", } - + class properties: deleted = schemas.BoolSchema __annotations__ = { "deleted": deleted, } - + deleted: MetaOapg.properties.deleted - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: - ... - + def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - deleted: typing.Union[ - MetaOapg.properties.deleted, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + deleted: typing.Union[MetaOapg.properties.deleted, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteLLMEndpointResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'DeleteLLMEndpointResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/delete_llm_endpoint_response.pyi b/launch/api_client/model/delete_llm_endpoint_response.pyi deleted file mode 100644 index f723efba..00000000 --- a/launch/api_client/model/delete_llm_endpoint_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class DeleteLLMEndpointResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "deleted", - } - - class properties: - deleted = schemas.BoolSchema - __annotations__ = { - "deleted": deleted, - } - deleted: MetaOapg.properties.deleted - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - deleted: typing.Union[ - MetaOapg.properties.deleted, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteLLMEndpointResponse": - return super().__new__( - cls, - *_args, - deleted=deleted, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/delete_model_endpoint_v1_response.py b/launch/api_client/model/delete_model_endpoint_v1_response.py index c6f2aaaf..1c9c6587 100644 --- a/launch/api_client/model/delete_model_endpoint_v1_response.py +++ b/launch/api_client/model/delete_model_endpoint_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class DeleteModelEndpointV1Response(schemas.DictSchema): +class DeleteModelEndpointV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "deleted", } - + class properties: deleted = schemas.BoolSchema __annotations__ = { "deleted": deleted, } - + deleted: MetaOapg.properties.deleted - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: - ... - + def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["deleted", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - deleted: typing.Union[ - MetaOapg.properties.deleted, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + deleted: typing.Union[MetaOapg.properties.deleted, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteModelEndpointV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'DeleteModelEndpointV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/delete_model_endpoint_v1_response.pyi b/launch/api_client/model/delete_model_endpoint_v1_response.pyi deleted file mode 100644 index e5ca73aa..00000000 --- a/launch/api_client/model/delete_model_endpoint_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class DeleteModelEndpointV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "deleted", - } - - class properties: - deleted = schemas.BoolSchema - __annotations__ = { - "deleted": deleted, - } - deleted: MetaOapg.properties.deleted - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - deleted: typing.Union[ - MetaOapg.properties.deleted, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteModelEndpointV1Response": - return super().__new__( - cls, - *_args, - deleted=deleted, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/delete_trigger_v1_response.py b/launch/api_client/model/delete_trigger_v1_response.py index 102d60b4..c313ec99 100644 --- a/launch/api_client/model/delete_trigger_v1_response.py +++ b/launch/api_client/model/delete_trigger_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class DeleteTriggerV1Response(schemas.DictSchema): +class DeleteTriggerV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "success", } - + class properties: success = schemas.BoolSchema __annotations__ = { "success": success, } - + success: MetaOapg.properties.success - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + success: typing.Union[MetaOapg.properties.success, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteTriggerV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'DeleteTriggerV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/delete_trigger_v1_response.pyi b/launch/api_client/model/delete_trigger_v1_response.pyi deleted file mode 100644 index c6ca7b86..00000000 --- a/launch/api_client/model/delete_trigger_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class DeleteTriggerV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteTriggerV1Response": - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/docker_image_batch_job.py b/launch/api_client/model/docker_image_batch_job.py index 60b9cea5..66d52246 100644 --- a/launch/api_client/model/docker_image_batch_job.py +++ b/launch/api_client/model/docker_image_batch_job.py @@ -23,16 +23,19 @@ from launch.api_client import schemas # noqa: F401 -class DockerImageBatchJob(schemas.DictSchema): +class DockerImageBatchJob( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech + Ref: https://openapi-generator.tech - Do not edit the class manually. + Do not edit the class manually. - This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job - created via the "supply a docker image for a k8s job" API. + This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job +created via the "supply a docker image for a k8s job" API. """ + class MetaOapg: required = { "owner", @@ -41,253 +44,216 @@ class MetaOapg: "created_by", "status", } - + class properties: - created_at = schemas.DateTimeSchema - created_by = schemas.StrSchema id = schemas.StrSchema + created_by = schemas.StrSchema owner = schemas.StrSchema - + created_at = schemas.DateTimeSchema + @staticmethod - def status() -> typing.Type["BatchJobStatus"]: + def status() -> typing.Type['BatchJobStatus']: return BatchJobStatus - - class annotations(schemas.DictSchema): + + + class completed_at( + schemas.DateTimeBase, + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + class MetaOapg: + format = 'date-time' + + + def __new__( + cls, + *_args: typing.Union[None, str, datetime, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'completed_at': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class annotations( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "annotations": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'annotations': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - completed_at = schemas.DateTimeSchema - num_workers = schemas.IntSchema - override_job_max_runtime_s = schemas.IntSchema + + + class override_job_max_runtime_s( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'override_job_max_runtime_s': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "created_at": created_at, - "created_by": created_by, "id": id, + "created_by": created_by, "owner": owner, + "created_at": created_at, "status": status, - "annotations": annotations, "completed_at": completed_at, - "num_workers": num_workers, + "annotations": annotations, "override_job_max_runtime_s": override_job_max_runtime_s, + "num_workers": num_workers, } - + owner: MetaOapg.properties.owner created_at: MetaOapg.properties.created_at id: MetaOapg.properties.id created_by: MetaOapg.properties.created_by - status: "BatchJobStatus" - + status: 'BatchJobStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["annotations"]) -> MetaOapg.properties.annotations: - ... - + def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["annotations"]) -> MetaOapg.properties.annotations: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_workers"]) -> MetaOapg.properties.num_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> MetaOapg.properties.override_job_max_runtime_s: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> MetaOapg.properties.override_job_max_runtime_s: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_workers"]) -> MetaOapg.properties.num_workers: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "id", - "owner", - "status", - "annotations", - "completed_at", - "num_workers", - "override_job_max_runtime_s", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "created_by", "owner", "created_at", "status", "completed_at", "annotations", "override_job_max_runtime_s", "num_workers", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["annotations"] - ) -> typing.Union[MetaOapg.properties.annotations, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["completed_at"]) -> typing.Union[MetaOapg.properties.completed_at, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["completed_at"] - ) -> typing.Union[MetaOapg.properties.completed_at, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["annotations"]) -> typing.Union[MetaOapg.properties.annotations, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_workers"] - ) -> typing.Union[MetaOapg.properties.num_workers, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["override_job_max_runtime_s"]) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_workers"]) -> typing.Union[MetaOapg.properties.num_workers, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "id", - "owner", - "status", - "annotations", - "completed_at", - "num_workers", - "override_job_max_runtime_s", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "created_by", "owner", "created_at", "status", "completed_at", "annotations", "override_job_max_runtime_s", "num_workers", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - owner: typing.Union[ - MetaOapg.properties.owner, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - created_by: typing.Union[ - MetaOapg.properties.created_by, - str, - ], - status: "BatchJobStatus", - annotations: typing.Union[ - MetaOapg.properties.annotations, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - completed_at: typing.Union[MetaOapg.properties.completed_at, str, datetime, schemas.Unset] = schemas.unset, - num_workers: typing.Union[MetaOapg.properties.num_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - override_job_max_runtime_s: typing.Union[ - MetaOapg.properties.override_job_max_runtime_s, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + owner: typing.Union[MetaOapg.properties.owner, str, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], + id: typing.Union[MetaOapg.properties.id, str, ], + created_by: typing.Union[MetaOapg.properties.created_by, str, ], + status: 'BatchJobStatus', + completed_at: typing.Union[MetaOapg.properties.completed_at, None, str, datetime, schemas.Unset] = schemas.unset, + annotations: typing.Union[MetaOapg.properties.annotations, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + override_job_max_runtime_s: typing.Union[MetaOapg.properties.override_job_max_runtime_s, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + num_workers: typing.Union[MetaOapg.properties.num_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DockerImageBatchJob": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'DockerImageBatchJob': return super().__new__( cls, *_args, @@ -296,13 +262,12 @@ def __new__( id=id, created_by=created_by, status=status, - annotations=annotations, completed_at=completed_at, - num_workers=num_workers, + annotations=annotations, override_job_max_runtime_s=override_job_max_runtime_s, + num_workers=num_workers, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/docker_image_batch_job.pyi b/launch/api_client/model/docker_image_batch_job.pyi deleted file mode 100644 index 9a3303cc..00000000 --- a/launch/api_client/model/docker_image_batch_job.pyi +++ /dev/null @@ -1,258 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class DockerImageBatchJob(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job - created via the "supply a docker image for a k8s job" API. - """ - - class MetaOapg: - required = { - "owner", - "created_at", - "id", - "created_by", - "status", - } - - class properties: - created_at = schemas.DateTimeSchema - created_by = schemas.StrSchema - id = schemas.StrSchema - owner = schemas.StrSchema - - @staticmethod - def status() -> typing.Type["BatchJobStatus"]: - return BatchJobStatus - - class annotations(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "annotations": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - completed_at = schemas.DateTimeSchema - num_workers = schemas.IntSchema - override_job_max_runtime_s = schemas.IntSchema - __annotations__ = { - "created_at": created_at, - "created_by": created_by, - "id": id, - "owner": owner, - "status": status, - "annotations": annotations, - "completed_at": completed_at, - "num_workers": num_workers, - "override_job_max_runtime_s": override_job_max_runtime_s, - } - owner: MetaOapg.properties.owner - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - created_by: MetaOapg.properties.created_by - status: "BatchJobStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["annotations"]) -> MetaOapg.properties.annotations: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_workers"]) -> MetaOapg.properties.num_workers: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> MetaOapg.properties.override_job_max_runtime_s: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "id", - "owner", - "status", - "annotations", - "completed_at", - "num_workers", - "override_job_max_runtime_s", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["annotations"] - ) -> typing.Union[MetaOapg.properties.annotations, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["completed_at"] - ) -> typing.Union[MetaOapg.properties.completed_at, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_workers"] - ) -> typing.Union[MetaOapg.properties.num_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["override_job_max_runtime_s"] - ) -> typing.Union[MetaOapg.properties.override_job_max_runtime_s, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "id", - "owner", - "status", - "annotations", - "completed_at", - "num_workers", - "override_job_max_runtime_s", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - owner: typing.Union[ - MetaOapg.properties.owner, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - created_by: typing.Union[ - MetaOapg.properties.created_by, - str, - ], - status: "BatchJobStatus", - annotations: typing.Union[ - MetaOapg.properties.annotations, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - completed_at: typing.Union[MetaOapg.properties.completed_at, str, datetime, schemas.Unset] = schemas.unset, - num_workers: typing.Union[MetaOapg.properties.num_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - override_job_max_runtime_s: typing.Union[ - MetaOapg.properties.override_job_max_runtime_s, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DockerImageBatchJob": - return super().__new__( - cls, - *_args, - owner=owner, - created_at=created_at, - id=id, - created_by=created_by, - status=status, - annotations=annotations, - completed_at=completed_at, - num_workers=num_workers, - override_job_max_runtime_s=override_job_max_runtime_s, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/docker_image_batch_job_bundle_v1_response.py b/launch/api_client/model/docker_image_batch_job_bundle_v1_response.py index 52c156cd..bebc5aaf 100644 --- a/launch/api_client/model/docker_image_batch_job_bundle_v1_response.py +++ b/launch/api_client/model/docker_image_batch_job_bundle_v1_response.py @@ -23,13 +23,16 @@ from launch.api_client import schemas # noqa: F401 -class DockerImageBatchJobBundleV1Response(schemas.DictSchema): +class DockerImageBatchJobBundleV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "image_repository", @@ -40,105 +43,222 @@ class MetaOapg: "env", "command", } - + class properties: - class command(schemas.ListSchema): + id = schemas.StrSchema + name = schemas.StrSchema + created_at = schemas.DateTimeSchema + image_repository = schemas.StrSchema + image_tag = schemas.StrSchema + + + class command( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": + ) -> 'command': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - created_at = schemas.DateTimeSchema - - class env(schemas.DictSchema): + + + class env( + schemas.DictSchema + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'env': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - id = schemas.StrSchema - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - name = schemas.StrSchema - cpus = schemas.StrSchema - gpu_type = schemas.StrSchema - gpus = schemas.IntSchema - memory = schemas.StrSchema - mount_location = schemas.StrSchema - public = schemas.BoolSchema - storage = schemas.StrSchema + + + class mount_location( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'mount_location': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class storage( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class gpu_type( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpu_type': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class public( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "command": command, - "created_at": created_at, - "env": env, "id": id, + "name": name, + "created_at": created_at, "image_repository": image_repository, "image_tag": image_tag, - "name": name, + "command": command, + "env": env, + "mount_location": mount_location, "cpus": cpus, - "gpu_type": gpu_type, - "gpus": gpus, "memory": memory, - "mount_location": mount_location, - "public": public, "storage": storage, + "gpus": gpus, + "gpu_type": gpu_type, + "public": public, } - + image_repository: MetaOapg.properties.image_repository name: MetaOapg.properties.name created_at: MetaOapg.properties.created_at @@ -146,254 +266,126 @@ def __new__( image_tag: MetaOapg.properties.image_tag env: MetaOapg.properties.env command: MetaOapg.properties.command - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: - ... - + def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> MetaOapg.properties.gpu_type: - ... - + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> MetaOapg.properties.gpu_type: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - + def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "created_at", - "env", - "id", - "image_repository", - "image_tag", - "name", - "cpus", - "gpu_type", - "gpus", - "memory", - "mount_location", - "public", - "storage", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "created_at", "image_repository", "image_tag", "command", "env", "mount_location", "cpus", "memory", "storage", "gpus", "gpu_type", "public", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["image_repository"]) -> MetaOapg.properties.image_repository: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["mount_location"]) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpu_type"] - ) -> typing.Union[MetaOapg.properties.gpu_type, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["mount_location"] - ) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public"] - ) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union[MetaOapg.properties.gpu_type, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["public"]) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "created_at", - "env", - "id", - "image_repository", - "image_tag", - "name", - "cpus", - "gpu_type", - "gpus", - "memory", - "mount_location", - "public", - "storage", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "created_at", "image_repository", "image_tag", "command", "env", "mount_location", "cpus", "memory", "storage", "gpus", "gpu_type", "public", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - image_repository: typing.Union[ - MetaOapg.properties.image_repository, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - image_tag: typing.Union[ - MetaOapg.properties.image_tag, - str, - ], - env: typing.Union[ - MetaOapg.properties.env, - dict, - frozendict.frozendict, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], - cpus: typing.Union[MetaOapg.properties.cpus, str, schemas.Unset] = schemas.unset, - gpu_type: typing.Union[MetaOapg.properties.gpu_type, str, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, str, schemas.Unset] = schemas.unset, - mount_location: typing.Union[MetaOapg.properties.mount_location, str, schemas.Unset] = schemas.unset, - public: typing.Union[MetaOapg.properties.public, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + image_repository: typing.Union[MetaOapg.properties.image_repository, str, ], + name: typing.Union[MetaOapg.properties.name, str, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], + id: typing.Union[MetaOapg.properties.id, str, ], + image_tag: typing.Union[MetaOapg.properties.image_tag, str, ], + env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, ], + command: typing.Union[MetaOapg.properties.command, list, tuple, ], + mount_location: typing.Union[MetaOapg.properties.mount_location, None, str, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, None, str, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, None, str, schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, None, str, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + gpu_type: typing.Union[MetaOapg.properties.gpu_type, None, str, schemas.Unset] = schemas.unset, + public: typing.Union[MetaOapg.properties.public, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DockerImageBatchJobBundleV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'DockerImageBatchJobBundleV1Response': return super().__new__( cls, *_args, @@ -404,13 +396,13 @@ def __new__( image_tag=image_tag, env=env, command=command, + mount_location=mount_location, cpus=cpus, - gpu_type=gpu_type, - gpus=gpus, memory=memory, - mount_location=mount_location, - public=public, storage=storage, + gpus=gpus, + gpu_type=gpu_type, + public=public, _configuration=_configuration, **kwargs, ) diff --git a/launch/api_client/model/docker_image_batch_job_bundle_v1_response.pyi b/launch/api_client/model/docker_image_batch_job_bundle_v1_response.pyi deleted file mode 100644 index 69d48ce4..00000000 --- a/launch/api_client/model/docker_image_batch_job_bundle_v1_response.pyi +++ /dev/null @@ -1,346 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class DockerImageBatchJobBundleV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "image_repository", - "name", - "created_at", - "id", - "image_tag", - "env", - "command", - } - - class properties: - class command(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - created_at = schemas.DateTimeSchema - - class env(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - id = schemas.StrSchema - image_repository = schemas.StrSchema - image_tag = schemas.StrSchema - name = schemas.StrSchema - cpus = schemas.StrSchema - gpu_type = schemas.StrSchema - gpus = schemas.IntSchema - memory = schemas.StrSchema - mount_location = schemas.StrSchema - public = schemas.BoolSchema - storage = schemas.StrSchema - __annotations__ = { - "command": command, - "created_at": created_at, - "env": env, - "id": id, - "image_repository": image_repository, - "image_tag": image_tag, - "name": name, - "cpus": cpus, - "gpu_type": gpu_type, - "gpus": gpus, - "memory": memory, - "mount_location": mount_location, - "public": public, - "storage": storage, - } - image_repository: MetaOapg.properties.image_repository - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - image_tag: MetaOapg.properties.image_tag - env: MetaOapg.properties.env - command: MetaOapg.properties.command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> MetaOapg.properties.gpu_type: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["mount_location"]) -> MetaOapg.properties.mount_location: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public"]) -> MetaOapg.properties.public: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "created_at", - "env", - "id", - "image_repository", - "image_tag", - "name", - "cpus", - "gpu_type", - "gpus", - "memory", - "mount_location", - "public", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_repository"] - ) -> MetaOapg.properties.image_repository: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpu_type"] - ) -> typing.Union[MetaOapg.properties.gpu_type, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["mount_location"] - ) -> typing.Union[MetaOapg.properties.mount_location, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public"] - ) -> typing.Union[MetaOapg.properties.public, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "created_at", - "env", - "id", - "image_repository", - "image_tag", - "name", - "cpus", - "gpu_type", - "gpus", - "memory", - "mount_location", - "public", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - image_repository: typing.Union[ - MetaOapg.properties.image_repository, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - image_tag: typing.Union[ - MetaOapg.properties.image_tag, - str, - ], - env: typing.Union[ - MetaOapg.properties.env, - dict, - frozendict.frozendict, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], - cpus: typing.Union[MetaOapg.properties.cpus, str, schemas.Unset] = schemas.unset, - gpu_type: typing.Union[MetaOapg.properties.gpu_type, str, schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[MetaOapg.properties.memory, str, schemas.Unset] = schemas.unset, - mount_location: typing.Union[MetaOapg.properties.mount_location, str, schemas.Unset] = schemas.unset, - public: typing.Union[MetaOapg.properties.public, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[MetaOapg.properties.storage, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DockerImageBatchJobBundleV1Response": - return super().__new__( - cls, - *_args, - image_repository=image_repository, - name=name, - created_at=created_at, - id=id, - image_tag=image_tag, - env=env, - command=command, - cpus=cpus, - gpu_type=gpu_type, - gpus=gpus, - memory=memory, - mount_location=mount_location, - public=public, - storage=storage, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/endpoint_predict_v1_request.py b/launch/api_client/model/endpoint_predict_v1_request.py index 2fcfd632..64c753c1 100644 --- a/launch/api_client/model/endpoint_predict_v1_request.py +++ b/launch/api_client/model/endpoint_predict_v1_request.py @@ -23,196 +23,197 @@ from launch.api_client import schemas # noqa: F401 -class EndpointPredictV1Request(schemas.DictSchema): +class EndpointPredictV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: + class properties: + + + class url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) args = schemas.AnyTypeSchema - + + + class cloudpickle( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cloudpickle': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def callback_auth() -> typing.Type["CallbackAuth"]: + def callback_auth() -> typing.Type['CallbackAuth']: return CallbackAuth - - callback_url = schemas.StrSchema - cloudpickle = schemas.StrSchema return_pickled = schemas.BoolSchema - url = schemas.StrSchema + + + class destination_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'destination_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { + "url": url, "args": args, - "callback_auth": callback_auth, - "callback_url": callback_url, "cloudpickle": cloudpickle, + "callback_url": callback_url, + "callback_auth": callback_auth, "return_pickled": return_pickled, - "url": url, + "destination_path": destination_path, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: - ... - + def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> "CallbackAuth": - ... - + def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: - ... - + def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: - ... - + def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: - ... - + def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> 'CallbackAuth': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: - ... - + def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "return_pickled", - "url", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["destination_path"]) -> MetaOapg.properties.destination_path: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["args"] - ) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["args"]) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_url"] - ) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cloudpickle"]) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cloudpickle"] - ) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["callback_url"]) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_pickled"] - ) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["url"] - ) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["return_pickled"]) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "return_pickled", - "url", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["destination_path"]) -> typing.Union[MetaOapg.properties.destination_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - args: typing.Union[ - MetaOapg.properties.args, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - callback_url: typing.Union[MetaOapg.properties.callback_url, str, schemas.Unset] = schemas.unset, - cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + url: typing.Union[MetaOapg.properties.url, None, str, schemas.Unset] = schemas.unset, + args: typing.Union[MetaOapg.properties.args, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, None, str, schemas.Unset] = schemas.unset, + callback_url: typing.Union[MetaOapg.properties.callback_url, None, str, schemas.Unset] = schemas.unset, + callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, return_pickled: typing.Union[MetaOapg.properties.return_pickled, bool, schemas.Unset] = schemas.unset, - url: typing.Union[MetaOapg.properties.url, str, schemas.Unset] = schemas.unset, + destination_path: typing.Union[MetaOapg.properties.destination_path, None, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "EndpointPredictV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'EndpointPredictV1Request': return super().__new__( cls, *_args, + url=url, args=args, - callback_auth=callback_auth, - callback_url=callback_url, cloudpickle=cloudpickle, + callback_url=callback_url, + callback_auth=callback_auth, return_pickled=return_pickled, - url=url, + destination_path=destination_path, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.callback_auth import CallbackAuth diff --git a/launch/api_client/model/endpoint_predict_v1_request.pyi b/launch/api_client/model/endpoint_predict_v1_request.pyi deleted file mode 100644 index 13dea9cf..00000000 --- a/launch/api_client/model/endpoint_predict_v1_request.pyi +++ /dev/null @@ -1,183 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class EndpointPredictV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - args = schemas.AnyTypeSchema - - @staticmethod - def callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - callback_url = schemas.StrSchema - cloudpickle = schemas.StrSchema - return_pickled = schemas.BoolSchema - url = schemas.StrSchema - __annotations__ = { - "args": args, - "callback_auth": callback_auth, - "callback_url": callback_url, - "cloudpickle": cloudpickle, - "return_pickled": return_pickled, - "url": url, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> "CallbackAuth": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "return_pickled", - "url", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["args"] - ) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_url"] - ) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cloudpickle"] - ) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_pickled"] - ) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["url"] - ) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "return_pickled", - "url", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - args: typing.Union[ - MetaOapg.properties.args, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - callback_url: typing.Union[MetaOapg.properties.callback_url, str, schemas.Unset] = schemas.unset, - cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, str, schemas.Unset] = schemas.unset, - return_pickled: typing.Union[MetaOapg.properties.return_pickled, bool, schemas.Unset] = schemas.unset, - url: typing.Union[MetaOapg.properties.url, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "EndpointPredictV1Request": - return super().__new__( - cls, - *_args, - args=args, - callback_auth=callback_auth, - callback_url=callback_url, - cloudpickle=cloudpickle, - return_pickled=return_pickled, - url=url, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.callback_auth import CallbackAuth diff --git a/launch/api_client/model/file.py b/launch/api_client/model/file.py new file mode 100644 index 00000000..5837ed26 --- /dev/null +++ b/launch/api_client/model/file.py @@ -0,0 +1,155 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class File( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + + class filename( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'filename': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class file_data( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'file_data': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class file_id( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'file_id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "filename": filename, + "file_data": file_data, + "file_id": file_id, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["file_data"]) -> MetaOapg.properties.file_data: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["file_id"]) -> MetaOapg.properties.file_id: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["filename", "file_data", "file_id", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["filename"]) -> typing.Union[MetaOapg.properties.filename, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["file_data"]) -> typing.Union[MetaOapg.properties.file_data, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["file_id"]) -> typing.Union[MetaOapg.properties.file_id, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["filename", "file_data", "file_id", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + filename: typing.Union[MetaOapg.properties.filename, None, str, schemas.Unset] = schemas.unset, + file_data: typing.Union[MetaOapg.properties.file_data, None, str, schemas.Unset] = schemas.unset, + file_id: typing.Union[MetaOapg.properties.file_id, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'File': + return super().__new__( + cls, + *_args, + filename=filename, + file_data=file_data, + file_id=file_id, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/filtered_chat_completion_v2_request.py b/launch/api_client/model/filtered_chat_completion_v2_request.py new file mode 100644 index 00000000..d40f2fe0 --- /dev/null +++ b/launch/api_client/model/filtered_chat_completion_v2_request.py @@ -0,0 +1,1805 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class FilteredChatCompletionV2Request( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "messages", + } + + class properties: + + + class messages( + schemas.ListSchema + ): + + + class MetaOapg: + min_items = 1 + + @staticmethod + def items() -> typing.Type['ChatCompletionRequestMessage']: + return ChatCompletionRequestMessage + + def __new__( + cls, + _arg: typing.Union[typing.Tuple['ChatCompletionRequestMessage'], typing.List['ChatCompletionRequestMessage']], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'messages': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> 'ChatCompletionRequestMessage': + return super().__getitem__(i) + + + class best_of( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'best_of': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_k( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = -1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_k': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class use_beam_search( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'use_beam_search': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class length_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'length_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class repetition_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'repetition_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class early_stopping( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'early_stopping': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stop_token_ids( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stop_token_ids': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class include_stop_str_in_output( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'include_stop_str_in_output': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ignore_eos( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ignore_eos': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class spaces_between_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'spaces_between_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class echo( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'echo': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class add_generation_prompt( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'add_generation_prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class continue_final_message( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'continue_final_message': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class add_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'add_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class documents( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + + class items( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'items': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'documents': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_kwargs( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'chat_template_kwargs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_json( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'guided_json': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_regex( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_regex': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_choice( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_grammar( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_grammar': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_decoding_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_decoding_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_whitespace_pattern( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_whitespace_pattern': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class priority( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def metadata() -> typing.Type['Metadata']: + return Metadata + + + class temperature( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'temperature': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 1.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class user( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'user': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def service_tier() -> typing.Type['ServiceTier']: + return ServiceTier + + + class model( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def modalities() -> typing.Type['ResponseModalities']: + return ResponseModalities + + @staticmethod + def reasoning_effort() -> typing.Type['ReasoningEffort']: + return ReasoningEffort + + + class max_completion_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_completion_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class frequency_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'frequency_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class presence_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'presence_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def web_search_options() -> typing.Type['WebSearchOptions']: + return WebSearchOptions + + + class top_logprobs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 20 + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class response_format( + schemas.ComposedSchema, + ): + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + ResponseFormatText, + ResponseFormatJsonSchema, + ResponseFormatJsonObject, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'response_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def audio() -> typing.Type['Audio2']: + return Audio2 + + + class store( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'store': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stream( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stream': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stop() -> typing.Type['StopConfiguration']: + return StopConfiguration + + + class logit_bias( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.IntSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], + ) -> 'logit_bias': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class logprobs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class n( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 128 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'n': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def prediction() -> typing.Type['PredictionContent']: + return PredictionContent + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = -9223372036854775616 + inclusive_minimum = 9223372036854775616 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stream_options() -> typing.Type['ChatCompletionStreamOptions']: + return ChatCompletionStreamOptions + + + class tools( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionTool']: + return ChatCompletionTool + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tools': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def tool_choice() -> typing.Type['ChatCompletionToolChoiceOption']: + return ChatCompletionToolChoiceOption + parallel_tool_calls = schemas.BoolSchema + + + class function_call( + schemas.ComposedSchema, + ): + + + class MetaOapg: + + + class any_of_0( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "none": "NONE", + "auto": "AUTO", + } + + @schemas.classproperty + def NONE(cls): + return cls("none") + + @schemas.classproperty + def AUTO(cls): + return cls("auto") + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + ChatCompletionFunctionCallOption, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'function_call': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class functions( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionFunctions']: + return ChatCompletionFunctions + max_items = 128 + min_items = 1 + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'functions': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "messages": messages, + "best_of": best_of, + "top_k": top_k, + "min_p": min_p, + "use_beam_search": use_beam_search, + "length_penalty": length_penalty, + "repetition_penalty": repetition_penalty, + "early_stopping": early_stopping, + "stop_token_ids": stop_token_ids, + "include_stop_str_in_output": include_stop_str_in_output, + "ignore_eos": ignore_eos, + "min_tokens": min_tokens, + "skip_special_tokens": skip_special_tokens, + "spaces_between_special_tokens": spaces_between_special_tokens, + "echo": echo, + "add_generation_prompt": add_generation_prompt, + "continue_final_message": continue_final_message, + "add_special_tokens": add_special_tokens, + "documents": documents, + "chat_template": chat_template, + "chat_template_kwargs": chat_template_kwargs, + "guided_json": guided_json, + "guided_regex": guided_regex, + "guided_choice": guided_choice, + "guided_grammar": guided_grammar, + "guided_decoding_backend": guided_decoding_backend, + "guided_whitespace_pattern": guided_whitespace_pattern, + "priority": priority, + "metadata": metadata, + "temperature": temperature, + "top_p": top_p, + "user": user, + "service_tier": service_tier, + "model": model, + "modalities": modalities, + "reasoning_effort": reasoning_effort, + "max_completion_tokens": max_completion_tokens, + "frequency_penalty": frequency_penalty, + "presence_penalty": presence_penalty, + "web_search_options": web_search_options, + "top_logprobs": top_logprobs, + "response_format": response_format, + "audio": audio, + "store": store, + "stream": stream, + "stop": stop, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "prediction": prediction, + "seed": seed, + "stream_options": stream_options, + "tools": tools, + "tool_choice": tool_choice, + "parallel_tool_calls": parallel_tool_calls, + "function_call": function_call, + "functions": functions, + } + + messages: MetaOapg.properties.messages + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["add_generation_prompt"]) -> MetaOapg.properties.add_generation_prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["continue_final_message"]) -> MetaOapg.properties.continue_final_message: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["documents"]) -> MetaOapg.properties.documents: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> MetaOapg.properties.chat_template_kwargs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> 'Metadata': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["service_tier"]) -> 'ServiceTier': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["modalities"]) -> 'ResponseModalities': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["reasoning_effort"]) -> 'ReasoningEffort': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_completion_tokens"]) -> MetaOapg.properties.max_completion_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["web_search_options"]) -> 'WebSearchOptions': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["audio"]) -> 'Audio2': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["store"]) -> MetaOapg.properties.store: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prediction"]) -> 'PredictionContent': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tools"]) -> MetaOapg.properties.tools: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_choice"]) -> 'ChatCompletionToolChoiceOption': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> MetaOapg.properties.parallel_tool_calls: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["function_call"]) -> MetaOapg.properties.function_call: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["functions"]) -> MetaOapg.properties.functions: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["messages", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "model", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["messages"]) -> MetaOapg.properties.messages: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["add_generation_prompt"]) -> typing.Union[MetaOapg.properties.add_generation_prompt, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["continue_final_message"]) -> typing.Union[MetaOapg.properties.continue_final_message, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["documents"]) -> typing.Union[MetaOapg.properties.documents, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_kwargs"]) -> typing.Union[MetaOapg.properties.chat_template_kwargs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union['Metadata', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["service_tier"]) -> typing.Union['ServiceTier', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> typing.Union[MetaOapg.properties.model, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["modalities"]) -> typing.Union['ResponseModalities', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["reasoning_effort"]) -> typing.Union['ReasoningEffort', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_completion_tokens"]) -> typing.Union[MetaOapg.properties.max_completion_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["web_search_options"]) -> typing.Union['WebSearchOptions', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> typing.Union[MetaOapg.properties.top_logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["audio"]) -> typing.Union['Audio2', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["store"]) -> typing.Union[MetaOapg.properties.store, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prediction"]) -> typing.Union['PredictionContent', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tools"]) -> typing.Union[MetaOapg.properties.tools, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_choice"]) -> typing.Union['ChatCompletionToolChoiceOption', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["parallel_tool_calls"]) -> typing.Union[MetaOapg.properties.parallel_tool_calls, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["function_call"]) -> typing.Union[MetaOapg.properties.function_call, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["functions"]) -> typing.Union[MetaOapg.properties.functions, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["messages", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "echo", "add_generation_prompt", "continue_final_message", "add_special_tokens", "documents", "chat_template", "chat_template_kwargs", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "priority", "metadata", "temperature", "top_p", "user", "service_tier", "model", "modalities", "reasoning_effort", "max_completion_tokens", "frequency_penalty", "presence_penalty", "web_search_options", "top_logprobs", "response_format", "audio", "store", "stream", "stop", "logit_bias", "logprobs", "max_tokens", "n", "prediction", "seed", "stream_options", "tools", "tool_choice", "parallel_tool_calls", "function_call", "functions", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + messages: typing.Union[MetaOapg.properties.messages, list, tuple, ], + best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, + length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, + stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, + include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, + ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, + min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, + spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, + echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, + add_generation_prompt: typing.Union[MetaOapg.properties.add_generation_prompt, None, bool, schemas.Unset] = schemas.unset, + continue_final_message: typing.Union[MetaOapg.properties.continue_final_message, None, bool, schemas.Unset] = schemas.unset, + add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, + documents: typing.Union[MetaOapg.properties.documents, list, tuple, None, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + chat_template_kwargs: typing.Union[MetaOapg.properties.chat_template_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, + guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, + guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, + guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, + guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, + priority: typing.Union[MetaOapg.properties.priority, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + metadata: typing.Union['Metadata', schemas.Unset] = schemas.unset, + temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, + service_tier: typing.Union['ServiceTier', schemas.Unset] = schemas.unset, + model: typing.Union[MetaOapg.properties.model, None, str, schemas.Unset] = schemas.unset, + modalities: typing.Union['ResponseModalities', schemas.Unset] = schemas.unset, + reasoning_effort: typing.Union['ReasoningEffort', schemas.Unset] = schemas.unset, + max_completion_tokens: typing.Union[MetaOapg.properties.max_completion_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + web_search_options: typing.Union['WebSearchOptions', schemas.Unset] = schemas.unset, + top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + audio: typing.Union['Audio2', schemas.Unset] = schemas.unset, + store: typing.Union[MetaOapg.properties.store, None, bool, schemas.Unset] = schemas.unset, + stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, + stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, + logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + logprobs: typing.Union[MetaOapg.properties.logprobs, None, bool, schemas.Unset] = schemas.unset, + max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + prediction: typing.Union['PredictionContent', schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, + tools: typing.Union[MetaOapg.properties.tools, list, tuple, None, schemas.Unset] = schemas.unset, + tool_choice: typing.Union['ChatCompletionToolChoiceOption', schemas.Unset] = schemas.unset, + parallel_tool_calls: typing.Union[MetaOapg.properties.parallel_tool_calls, bool, schemas.Unset] = schemas.unset, + function_call: typing.Union[MetaOapg.properties.function_call, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + functions: typing.Union[MetaOapg.properties.functions, list, tuple, None, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'FilteredChatCompletionV2Request': + return super().__new__( + cls, + *_args, + messages=messages, + best_of=best_of, + top_k=top_k, + min_p=min_p, + use_beam_search=use_beam_search, + length_penalty=length_penalty, + repetition_penalty=repetition_penalty, + early_stopping=early_stopping, + stop_token_ids=stop_token_ids, + include_stop_str_in_output=include_stop_str_in_output, + ignore_eos=ignore_eos, + min_tokens=min_tokens, + skip_special_tokens=skip_special_tokens, + spaces_between_special_tokens=spaces_between_special_tokens, + echo=echo, + add_generation_prompt=add_generation_prompt, + continue_final_message=continue_final_message, + add_special_tokens=add_special_tokens, + documents=documents, + chat_template=chat_template, + chat_template_kwargs=chat_template_kwargs, + guided_json=guided_json, + guided_regex=guided_regex, + guided_choice=guided_choice, + guided_grammar=guided_grammar, + guided_decoding_backend=guided_decoding_backend, + guided_whitespace_pattern=guided_whitespace_pattern, + priority=priority, + metadata=metadata, + temperature=temperature, + top_p=top_p, + user=user, + service_tier=service_tier, + model=model, + modalities=modalities, + reasoning_effort=reasoning_effort, + max_completion_tokens=max_completion_tokens, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + web_search_options=web_search_options, + top_logprobs=top_logprobs, + response_format=response_format, + audio=audio, + store=store, + stream=stream, + stop=stop, + logit_bias=logit_bias, + logprobs=logprobs, + max_tokens=max_tokens, + n=n, + prediction=prediction, + seed=seed, + stream_options=stream_options, + tools=tools, + tool_choice=tool_choice, + parallel_tool_calls=parallel_tool_calls, + function_call=function_call, + functions=functions, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.audio2 import Audio2 +from launch.api_client.model.chat_completion_function_call_option import ( + ChatCompletionFunctionCallOption, +) +from launch.api_client.model.chat_completion_functions import ( + ChatCompletionFunctions, +) +from launch.api_client.model.chat_completion_request_message import ( + ChatCompletionRequestMessage, +) +from launch.api_client.model.chat_completion_stream_options import ( + ChatCompletionStreamOptions, +) +from launch.api_client.model.chat_completion_tool import ChatCompletionTool +from launch.api_client.model.chat_completion_tool_choice_option import ( + ChatCompletionToolChoiceOption, +) +from launch.api_client.model.metadata import Metadata +from launch.api_client.model.prediction_content import PredictionContent +from launch.api_client.model.reasoning_effort import ReasoningEffort +from launch.api_client.model.response_format_json_object import ( + ResponseFormatJsonObject, +) +from launch.api_client.model.response_format_json_schema import ( + ResponseFormatJsonSchema, +) +from launch.api_client.model.response_format_text import ResponseFormatText +from launch.api_client.model.response_modalities import ResponseModalities +from launch.api_client.model.service_tier import ServiceTier +from launch.api_client.model.stop_configuration import StopConfiguration +from launch.api_client.model.web_search_options import WebSearchOptions diff --git a/launch/api_client/model/filtered_completion_v2_request.py b/launch/api_client/model/filtered_completion_v2_request.py new file mode 100644 index 00000000..3659e4cc --- /dev/null +++ b/launch/api_client/model/filtered_completion_v2_request.py @@ -0,0 +1,1295 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class FilteredCompletionV2Request( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "prompt", + } + + class properties: + + + class prompt( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + + class any_of_1( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'any_of_1': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + Prompt, + Prompt1, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class best_of( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 20 + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'best_of': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_k( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = -1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_k': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class use_beam_search( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'use_beam_search': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class length_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'length_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class repetition_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'repetition_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class early_stopping( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'early_stopping': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stop_token_ids( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stop_token_ids': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class include_stop_str_in_output( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'include_stop_str_in_output': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ignore_eos( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ignore_eos': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class spaces_between_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'spaces_between_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class add_special_tokens( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'add_special_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class response_format( + schemas.ComposedSchema, + ): + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + ResponseFormatText, + ResponseFormatJsonSchema, + ResponseFormatJsonObject, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'response_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_json( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'guided_json': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class guided_regex( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_regex': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_choice( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_grammar( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_grammar': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_decoding_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_decoding_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class guided_whitespace_pattern( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'guided_whitespace_pattern': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class model( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class echo( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'echo': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class frequency_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'frequency_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class logit_bias( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.IntSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, ], + ) -> 'logit_bias': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class logprobs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 5 + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class n( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 128 + inclusive_minimum = 1 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'n': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class presence_penalty( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = -2.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'presence_penalty': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stop() -> typing.Type['StopConfiguration']: + return StopConfiguration + + + class stream( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stream': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def stream_options() -> typing.Type['ChatCompletionStreamOptions']: + return ChatCompletionStreamOptions + + + class suffix( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'suffix': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class temperature( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 2.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'temperature': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_p( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_maximum = 1.0 + inclusive_minimum = 0.0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_p': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class user( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'user': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "prompt": prompt, + "best_of": best_of, + "top_k": top_k, + "min_p": min_p, + "use_beam_search": use_beam_search, + "length_penalty": length_penalty, + "repetition_penalty": repetition_penalty, + "early_stopping": early_stopping, + "stop_token_ids": stop_token_ids, + "include_stop_str_in_output": include_stop_str_in_output, + "ignore_eos": ignore_eos, + "min_tokens": min_tokens, + "skip_special_tokens": skip_special_tokens, + "spaces_between_special_tokens": spaces_between_special_tokens, + "add_special_tokens": add_special_tokens, + "response_format": response_format, + "guided_json": guided_json, + "guided_regex": guided_regex, + "guided_choice": guided_choice, + "guided_grammar": guided_grammar, + "guided_decoding_backend": guided_decoding_backend, + "guided_whitespace_pattern": guided_whitespace_pattern, + "model": model, + "echo": echo, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "presence_penalty": presence_penalty, + "seed": seed, + "stop": stop, + "stream": stream, + "stream_options": stream_options, + "suffix": suffix, + "temperature": temperature, + "top_p": top_p, + "user": user, + } + + prompt: MetaOapg.properties.prompt + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["best_of"]) -> MetaOapg.properties.best_of: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_k"]) -> MetaOapg.properties.top_k: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_p"]) -> MetaOapg.properties.min_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["use_beam_search"]) -> MetaOapg.properties.use_beam_search: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["length_penalty"]) -> MetaOapg.properties.length_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["repetition_penalty"]) -> MetaOapg.properties.repetition_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["early_stopping"]) -> MetaOapg.properties.early_stopping: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop_token_ids"]) -> MetaOapg.properties.stop_token_ids: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> MetaOapg.properties.include_stop_str_in_output: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ignore_eos"]) -> MetaOapg.properties.ignore_eos: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_tokens"]) -> MetaOapg.properties.min_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_special_tokens"]) -> MetaOapg.properties.skip_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> MetaOapg.properties.spaces_between_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["add_special_tokens"]) -> MetaOapg.properties.add_special_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["response_format"]) -> MetaOapg.properties.response_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_json"]) -> MetaOapg.properties.guided_json: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_regex"]) -> MetaOapg.properties.guided_regex: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_choice"]) -> MetaOapg.properties.guided_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_grammar"]) -> MetaOapg.properties.guided_grammar: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> MetaOapg.properties.guided_decoding_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> MetaOapg.properties.guided_whitespace_pattern: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model"]) -> MetaOapg.properties.model: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["echo"]) -> MetaOapg.properties.echo: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["frequency_penalty"]) -> MetaOapg.properties.frequency_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logit_bias"]) -> MetaOapg.properties.logit_bias: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprobs"]) -> MetaOapg.properties.logprobs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_tokens"]) -> MetaOapg.properties.max_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["n"]) -> MetaOapg.properties.n: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["presence_penalty"]) -> MetaOapg.properties.presence_penalty: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stop"]) -> 'StopConfiguration': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream"]) -> MetaOapg.properties.stream: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream_options"]) -> 'ChatCompletionStreamOptions': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["suffix"]) -> MetaOapg.properties.suffix: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["temperature"]) -> MetaOapg.properties.temperature: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_p"]) -> MetaOapg.properties.top_p: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["user"]) -> MetaOapg.properties.user: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "model", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prompt"]) -> MetaOapg.properties.prompt: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["best_of"]) -> typing.Union[MetaOapg.properties.best_of, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_k"]) -> typing.Union[MetaOapg.properties.top_k, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_p"]) -> typing.Union[MetaOapg.properties.min_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["use_beam_search"]) -> typing.Union[MetaOapg.properties.use_beam_search, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["length_penalty"]) -> typing.Union[MetaOapg.properties.length_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["repetition_penalty"]) -> typing.Union[MetaOapg.properties.repetition_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["early_stopping"]) -> typing.Union[MetaOapg.properties.early_stopping, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop_token_ids"]) -> typing.Union[MetaOapg.properties.stop_token_ids, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["include_stop_str_in_output"]) -> typing.Union[MetaOapg.properties.include_stop_str_in_output, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ignore_eos"]) -> typing.Union[MetaOapg.properties.ignore_eos, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_tokens"]) -> typing.Union[MetaOapg.properties.min_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_special_tokens"]) -> typing.Union[MetaOapg.properties.skip_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["spaces_between_special_tokens"]) -> typing.Union[MetaOapg.properties.spaces_between_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["add_special_tokens"]) -> typing.Union[MetaOapg.properties.add_special_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["response_format"]) -> typing.Union[MetaOapg.properties.response_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_json"]) -> typing.Union[MetaOapg.properties.guided_json, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_regex"]) -> typing.Union[MetaOapg.properties.guided_regex, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_choice"]) -> typing.Union[MetaOapg.properties.guided_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_grammar"]) -> typing.Union[MetaOapg.properties.guided_grammar, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_decoding_backend"]) -> typing.Union[MetaOapg.properties.guided_decoding_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["guided_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.guided_whitespace_pattern, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model"]) -> typing.Union[MetaOapg.properties.model, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["echo"]) -> typing.Union[MetaOapg.properties.echo, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["frequency_penalty"]) -> typing.Union[MetaOapg.properties.frequency_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logit_bias"]) -> typing.Union[MetaOapg.properties.logit_bias, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprobs"]) -> typing.Union[MetaOapg.properties.logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_tokens"]) -> typing.Union[MetaOapg.properties.max_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["n"]) -> typing.Union[MetaOapg.properties.n, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["presence_penalty"]) -> typing.Union[MetaOapg.properties.presence_penalty, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stop"]) -> typing.Union['StopConfiguration', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream"]) -> typing.Union[MetaOapg.properties.stream, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream_options"]) -> typing.Union['ChatCompletionStreamOptions', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["suffix"]) -> typing.Union[MetaOapg.properties.suffix, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["temperature"]) -> typing.Union[MetaOapg.properties.temperature, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_p"]) -> typing.Union[MetaOapg.properties.top_p, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["user"]) -> typing.Union[MetaOapg.properties.user, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["prompt", "best_of", "top_k", "min_p", "use_beam_search", "length_penalty", "repetition_penalty", "early_stopping", "stop_token_ids", "include_stop_str_in_output", "ignore_eos", "min_tokens", "skip_special_tokens", "spaces_between_special_tokens", "add_special_tokens", "response_format", "guided_json", "guided_regex", "guided_choice", "guided_grammar", "guided_decoding_backend", "guided_whitespace_pattern", "model", "echo", "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "n", "presence_penalty", "seed", "stop", "stream", "stream_options", "suffix", "temperature", "top_p", "user", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + prompt: typing.Union[MetaOapg.properties.prompt, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + best_of: typing.Union[MetaOapg.properties.best_of, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + top_k: typing.Union[MetaOapg.properties.top_k, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + min_p: typing.Union[MetaOapg.properties.min_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + use_beam_search: typing.Union[MetaOapg.properties.use_beam_search, None, bool, schemas.Unset] = schemas.unset, + length_penalty: typing.Union[MetaOapg.properties.length_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + repetition_penalty: typing.Union[MetaOapg.properties.repetition_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + early_stopping: typing.Union[MetaOapg.properties.early_stopping, None, bool, schemas.Unset] = schemas.unset, + stop_token_ids: typing.Union[MetaOapg.properties.stop_token_ids, list, tuple, None, schemas.Unset] = schemas.unset, + include_stop_str_in_output: typing.Union[MetaOapg.properties.include_stop_str_in_output, None, bool, schemas.Unset] = schemas.unset, + ignore_eos: typing.Union[MetaOapg.properties.ignore_eos, None, bool, schemas.Unset] = schemas.unset, + min_tokens: typing.Union[MetaOapg.properties.min_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + skip_special_tokens: typing.Union[MetaOapg.properties.skip_special_tokens, None, bool, schemas.Unset] = schemas.unset, + spaces_between_special_tokens: typing.Union[MetaOapg.properties.spaces_between_special_tokens, None, bool, schemas.Unset] = schemas.unset, + add_special_tokens: typing.Union[MetaOapg.properties.add_special_tokens, None, bool, schemas.Unset] = schemas.unset, + response_format: typing.Union[MetaOapg.properties.response_format, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + guided_json: typing.Union[MetaOapg.properties.guided_json, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + guided_regex: typing.Union[MetaOapg.properties.guided_regex, None, str, schemas.Unset] = schemas.unset, + guided_choice: typing.Union[MetaOapg.properties.guided_choice, list, tuple, None, schemas.Unset] = schemas.unset, + guided_grammar: typing.Union[MetaOapg.properties.guided_grammar, None, str, schemas.Unset] = schemas.unset, + guided_decoding_backend: typing.Union[MetaOapg.properties.guided_decoding_backend, None, str, schemas.Unset] = schemas.unset, + guided_whitespace_pattern: typing.Union[MetaOapg.properties.guided_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, + model: typing.Union[MetaOapg.properties.model, None, str, schemas.Unset] = schemas.unset, + echo: typing.Union[MetaOapg.properties.echo, None, bool, schemas.Unset] = schemas.unset, + frequency_penalty: typing.Union[MetaOapg.properties.frequency_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + logit_bias: typing.Union[MetaOapg.properties.logit_bias, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + logprobs: typing.Union[MetaOapg.properties.logprobs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_tokens: typing.Union[MetaOapg.properties.max_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + n: typing.Union[MetaOapg.properties.n, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + presence_penalty: typing.Union[MetaOapg.properties.presence_penalty, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + stop: typing.Union['StopConfiguration', schemas.Unset] = schemas.unset, + stream: typing.Union[MetaOapg.properties.stream, None, bool, schemas.Unset] = schemas.unset, + stream_options: typing.Union['ChatCompletionStreamOptions', schemas.Unset] = schemas.unset, + suffix: typing.Union[MetaOapg.properties.suffix, None, str, schemas.Unset] = schemas.unset, + temperature: typing.Union[MetaOapg.properties.temperature, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + top_p: typing.Union[MetaOapg.properties.top_p, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + user: typing.Union[MetaOapg.properties.user, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'FilteredCompletionV2Request': + return super().__new__( + cls, + *_args, + prompt=prompt, + best_of=best_of, + top_k=top_k, + min_p=min_p, + use_beam_search=use_beam_search, + length_penalty=length_penalty, + repetition_penalty=repetition_penalty, + early_stopping=early_stopping, + stop_token_ids=stop_token_ids, + include_stop_str_in_output=include_stop_str_in_output, + ignore_eos=ignore_eos, + min_tokens=min_tokens, + skip_special_tokens=skip_special_tokens, + spaces_between_special_tokens=spaces_between_special_tokens, + add_special_tokens=add_special_tokens, + response_format=response_format, + guided_json=guided_json, + guided_regex=guided_regex, + guided_choice=guided_choice, + guided_grammar=guided_grammar, + guided_decoding_backend=guided_decoding_backend, + guided_whitespace_pattern=guided_whitespace_pattern, + model=model, + echo=echo, + frequency_penalty=frequency_penalty, + logit_bias=logit_bias, + logprobs=logprobs, + max_tokens=max_tokens, + n=n, + presence_penalty=presence_penalty, + seed=seed, + stop=stop, + stream=stream, + stream_options=stream_options, + suffix=suffix, + temperature=temperature, + top_p=top_p, + user=user, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_stream_options import ( + ChatCompletionStreamOptions, +) +from launch.api_client.model.prompt import Prompt +from launch.api_client.model.prompt1 import Prompt1 +from launch.api_client.model.response_format_json_object import ( + ResponseFormatJsonObject, +) +from launch.api_client.model.response_format_json_schema import ( + ResponseFormatJsonSchema, +) +from launch.api_client.model.response_format_text import ResponseFormatText +from launch.api_client.model.stop_configuration import StopConfiguration diff --git a/launch/api_client/model/function1.py b/launch/api_client/model/function1.py new file mode 100644 index 00000000..40d825cd --- /dev/null +++ b/launch/api_client/model/function1.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Function1( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "name", + "arguments", + } + + class properties: + name = schemas.StrSchema + arguments = schemas.StrSchema + __annotations__ = { + "name": name, + "arguments": arguments, + } + + name: MetaOapg.properties.name + arguments: MetaOapg.properties.arguments + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], + arguments: typing.Union[MetaOapg.properties.arguments, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Function1': + return super().__new__( + cls, + *_args, + name=name, + arguments=arguments, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/function2.py b/launch/api_client/model/function2.py new file mode 100644 index 00000000..5047325c --- /dev/null +++ b/launch/api_client/model/function2.py @@ -0,0 +1,126 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Function2( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + + class name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class arguments( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'arguments': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "name": name, + "arguments": arguments, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> typing.Union[MetaOapg.properties.arguments, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "arguments", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, + arguments: typing.Union[MetaOapg.properties.arguments, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Function2': + return super().__new__( + cls, + *_args, + name=name, + arguments=arguments, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/function3.py b/launch/api_client/model/function3.py new file mode 100644 index 00000000..183409ff --- /dev/null +++ b/launch/api_client/model/function3.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Function3( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "name", + } + + class properties: + name = schemas.StrSchema + __annotations__ = { + "name": name, + } + + name: MetaOapg.properties.name + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Function3': + return super().__new__( + cls, + *_args, + name=name, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/function_call.py b/launch/api_client/model/function_call.py new file mode 100644 index 00000000..8fe1ae52 --- /dev/null +++ b/launch/api_client/model/function_call.py @@ -0,0 +1,95 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class FunctionCall( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "name", + "arguments", + } + + class properties: + arguments = schemas.StrSchema + name = schemas.StrSchema + __annotations__ = { + "arguments": arguments, + "name": name, + } + + name: MetaOapg.properties.name + arguments: MetaOapg.properties.arguments + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], + arguments: typing.Union[MetaOapg.properties.arguments, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'FunctionCall': + return super().__new__( + cls, + *_args, + name=name, + arguments=arguments, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/function_call2.py b/launch/api_client/model/function_call2.py new file mode 100644 index 00000000..4a13f166 --- /dev/null +++ b/launch/api_client/model/function_call2.py @@ -0,0 +1,126 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class FunctionCall2( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + + class arguments( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'arguments': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "arguments": arguments, + "name": name, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["arguments"]) -> MetaOapg.properties.arguments: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["arguments"]) -> typing.Union[MetaOapg.properties.arguments, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> typing.Union[MetaOapg.properties.name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["arguments", "name", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + arguments: typing.Union[MetaOapg.properties.arguments, None, str, schemas.Unset] = schemas.unset, + name: typing.Union[MetaOapg.properties.name, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'FunctionCall2': + return super().__new__( + cls, + *_args, + arguments=arguments, + name=name, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/function_object.py b/launch/api_client/model/function_object.py new file mode 100644 index 00000000..631de02b --- /dev/null +++ b/launch/api_client/model/function_object.py @@ -0,0 +1,156 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class FunctionObject( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "name", + } + + class properties: + name = schemas.StrSchema + + + class description( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'description': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def parameters() -> typing.Type['FunctionParameters']: + return FunctionParameters + + + class strict( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'strict': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "name": name, + "description": description, + "parameters": parameters, + "strict": strict, + } + + name: MetaOapg.properties.name + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["parameters"]) -> 'FunctionParameters': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["strict"]) -> MetaOapg.properties.strict: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", "strict", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["parameters"]) -> typing.Union['FunctionParameters', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["strict"]) -> typing.Union[MetaOapg.properties.strict, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "description", "parameters", "strict", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], + description: typing.Union[MetaOapg.properties.description, None, str, schemas.Unset] = schemas.unset, + parameters: typing.Union['FunctionParameters', schemas.Unset] = schemas.unset, + strict: typing.Union[MetaOapg.properties.strict, None, bool, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'FunctionObject': + return super().__new__( + cls, + *_args, + name=name, + description=description, + parameters=parameters, + strict=strict, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.function_parameters import FunctionParameters diff --git a/launch/api_client/model/function_parameters.py b/launch/api_client/model/function_parameters.py new file mode 100644 index 00000000..17562468 --- /dev/null +++ b/launch/api_client/model/function_parameters.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class FunctionParameters( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'FunctionParameters': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/get_async_task_v1_response.py b/launch/api_client/model/get_async_task_v1_response.py index 80939dab..0bfed678 100644 --- a/launch/api_client/model/get_async_task_v1_response.py +++ b/launch/api_client/model/get_async_task_v1_response.py @@ -23,160 +23,136 @@ from launch.api_client import schemas # noqa: F401 -class GetAsyncTaskV1Response(schemas.DictSchema): +class GetAsyncTaskV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "task_id", "status", } - + class properties: + task_id = schemas.StrSchema + @staticmethod - def status() -> typing.Type["TaskStatus"]: + def status() -> typing.Type['TaskStatus']: return TaskStatus - - task_id = schemas.StrSchema result = schemas.AnyTypeSchema - traceback = schemas.StrSchema + + + class traceback( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'traceback': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class status_code( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'status_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "status": status, "task_id": task_id, + "status": status, "result": result, "traceback": traceback, + "status_code": status_code, } - + task_id: MetaOapg.properties.task_id - status: "TaskStatus" - + status: 'TaskStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": - ... - + def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: - ... - + def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: - ... - + def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "task_id", - "result", - "traceback", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["task_id", "status", "result", "traceback", "status_code", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["result"] - ) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["result"]) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["traceback"] - ) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["traceback"]) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "task_id", - "result", - "traceback", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> typing.Union[MetaOapg.properties.status_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["task_id", "status", "result", "traceback", "status_code", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - task_id: typing.Union[ - MetaOapg.properties.task_id, - str, - ], - status: "TaskStatus", - result: typing.Union[ - MetaOapg.properties.result, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - traceback: typing.Union[MetaOapg.properties.traceback, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + task_id: typing.Union[MetaOapg.properties.task_id, str, ], + status: 'TaskStatus', + result: typing.Union[MetaOapg.properties.result, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + traceback: typing.Union[MetaOapg.properties.traceback, None, str, schemas.Unset] = schemas.unset, + status_code: typing.Union[MetaOapg.properties.status_code, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetAsyncTaskV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetAsyncTaskV1Response': return super().__new__( cls, *_args, @@ -184,9 +160,9 @@ def __new__( status=status, result=result, traceback=traceback, + status_code=status_code, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.task_status import TaskStatus diff --git a/launch/api_client/model/get_async_task_v1_response.pyi b/launch/api_client/model/get_async_task_v1_response.pyi deleted file mode 100644 index bf4524d5..00000000 --- a/launch/api_client/model/get_async_task_v1_response.pyi +++ /dev/null @@ -1,165 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetAsyncTaskV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "task_id", - "status", - } - - class properties: - @staticmethod - def status() -> typing.Type["TaskStatus"]: - return TaskStatus - task_id = schemas.StrSchema - result = schemas.AnyTypeSchema - traceback = schemas.StrSchema - __annotations__ = { - "status": status, - "task_id": task_id, - "result": result, - "traceback": traceback, - } - task_id: MetaOapg.properties.task_id - status: "TaskStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "task_id", - "result", - "traceback", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["task_id"]) -> MetaOapg.properties.task_id: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["result"] - ) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["traceback"] - ) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "task_id", - "result", - "traceback", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - task_id: typing.Union[ - MetaOapg.properties.task_id, - str, - ], - status: "TaskStatus", - result: typing.Union[ - MetaOapg.properties.result, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - traceback: typing.Union[MetaOapg.properties.traceback, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetAsyncTaskV1Response": - return super().__new__( - cls, - *_args, - task_id=task_id, - status=status, - result=result, - traceback=traceback, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.task_status import TaskStatus diff --git a/launch/api_client/model/create_batch_completions_response.pyi b/launch/api_client/model/get_batch_completion_v2_response.py similarity index 53% rename from launch/api_client/model/create_batch_completions_response.pyi rename to launch/api_client/model/get_batch_completion_v2_response.py index 404b999b..3a136fe3 100644 --- a/launch/api_client/model/create_batch_completions_response.pyi +++ b/launch/api_client/model/get_batch_completion_v2_response.py @@ -19,84 +19,70 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class CreateBatchCompletionsResponse(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class GetBatchCompletionV2Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { - "job_id", + "job", } - + class properties: - job_id = schemas.StrSchema + + @staticmethod + def job() -> typing.Type['BatchCompletionsJob']: + return BatchCompletionsJob __annotations__ = { - "job_id": job_id, + "job": job, } - job_id: MetaOapg.properties.job_id - + + job: 'BatchCompletionsJob' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + def __getitem__(self, name: typing_extensions.Literal["job"]) -> 'BatchCompletionsJob': ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["job", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + def get_item_oapg(self, name: typing_extensions.Literal["job"]) -> 'BatchCompletionsJob': ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["job_id",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - job_id: typing.Union[ - MetaOapg.properties.job_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + job: 'BatchCompletionsJob', _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "CreateBatchCompletionsResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetBatchCompletionV2Response': return super().__new__( cls, *_args, - job_id=job_id, + job=job, _configuration=_configuration, **kwargs, ) + +from launch.api_client.model.batch_completions_job import BatchCompletionsJob diff --git a/launch/api_client/model/get_batch_job_v1_response.py b/launch/api_client/model/get_batch_job_v1_response.py index e5327ade..667120b9 100644 --- a/launch/api_client/model/get_batch_job_v1_response.py +++ b/launch/api_client/model/get_batch_job_v1_response.py @@ -23,178 +23,165 @@ from launch.api_client import schemas # noqa: F401 -class GetBatchJobV1Response(schemas.DictSchema): +class GetBatchJobV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "duration", "status", } - + class properties: - duration = schemas.NumberSchema - + @staticmethod - def status() -> typing.Type["BatchJobStatus"]: + def status() -> typing.Type['BatchJobStatus']: return BatchJobStatus - - num_tasks_completed = schemas.IntSchema - num_tasks_pending = schemas.IntSchema - result = schemas.StrSchema + duration = schemas.StrSchema + + + class result( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'result': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_tasks_pending( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_tasks_pending': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_tasks_completed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_tasks_completed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "duration": duration, "status": status, - "num_tasks_completed": num_tasks_completed, - "num_tasks_pending": num_tasks_pending, + "duration": duration, "result": result, + "num_tasks_pending": num_tasks_pending, + "num_tasks_completed": num_tasks_completed, } - + duration: MetaOapg.properties.duration - status: "BatchJobStatus" - + status: 'BatchJobStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - + def __getitem__(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_tasks_completed"] - ) -> MetaOapg.properties.num_tasks_completed: - ... - + def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_tasks_pending"] - ) -> MetaOapg.properties.num_tasks_pending: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_tasks_pending"]) -> MetaOapg.properties.num_tasks_pending: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_tasks_completed"]) -> MetaOapg.properties.num_tasks_completed: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "duration", - "status", - "num_tasks_completed", - "num_tasks_pending", - "result", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["status", "duration", "result", "num_tasks_pending", "num_tasks_completed", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_tasks_completed"] - ) -> typing.Union[MetaOapg.properties.num_tasks_completed, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["result"]) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_tasks_pending"] - ) -> typing.Union[MetaOapg.properties.num_tasks_pending, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_tasks_pending"]) -> typing.Union[MetaOapg.properties.num_tasks_pending, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["result"] - ) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_tasks_completed"]) -> typing.Union[MetaOapg.properties.num_tasks_completed, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "duration", - "status", - "num_tasks_completed", - "num_tasks_pending", - "result", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status", "duration", "result", "num_tasks_pending", "num_tasks_completed", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - duration: typing.Union[ - MetaOapg.properties.duration, - decimal.Decimal, - int, - float, - ], - status: "BatchJobStatus", - num_tasks_completed: typing.Union[ - MetaOapg.properties.num_tasks_completed, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - num_tasks_pending: typing.Union[ - MetaOapg.properties.num_tasks_pending, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - result: typing.Union[MetaOapg.properties.result, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + duration: typing.Union[MetaOapg.properties.duration, str, ], + status: 'BatchJobStatus', + result: typing.Union[MetaOapg.properties.result, None, str, schemas.Unset] = schemas.unset, + num_tasks_pending: typing.Union[MetaOapg.properties.num_tasks_pending, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + num_tasks_completed: typing.Union[MetaOapg.properties.num_tasks_completed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetBatchJobV1Response': return super().__new__( cls, *_args, duration=duration, status=status, - num_tasks_completed=num_tasks_completed, - num_tasks_pending=num_tasks_pending, result=result, + num_tasks_pending=num_tasks_pending, + num_tasks_completed=num_tasks_completed, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_batch_job_v1_response.pyi b/launch/api_client/model/get_batch_job_v1_response.pyi deleted file mode 100644 index 264bf1cb..00000000 --- a/launch/api_client/model/get_batch_job_v1_response.pyi +++ /dev/null @@ -1,169 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetBatchJobV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "duration", - "status", - } - - class properties: - duration = schemas.NumberSchema - - @staticmethod - def status() -> typing.Type["BatchJobStatus"]: - return BatchJobStatus - num_tasks_completed = schemas.IntSchema - num_tasks_pending = schemas.IntSchema - result = schemas.StrSchema - __annotations__ = { - "duration": duration, - "status": status, - "num_tasks_completed": num_tasks_completed, - "num_tasks_pending": num_tasks_pending, - "result": result, - } - duration: MetaOapg.properties.duration - status: "BatchJobStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_tasks_completed"] - ) -> MetaOapg.properties.num_tasks_completed: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_tasks_pending"] - ) -> MetaOapg.properties.num_tasks_pending: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "duration", - "status", - "num_tasks_completed", - "num_tasks_pending", - "result", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["duration"]) -> MetaOapg.properties.duration: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_tasks_completed"] - ) -> typing.Union[MetaOapg.properties.num_tasks_completed, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_tasks_pending"] - ) -> typing.Union[MetaOapg.properties.num_tasks_pending, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["result"] - ) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "duration", - "status", - "num_tasks_completed", - "num_tasks_pending", - "result", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - duration: typing.Union[ - MetaOapg.properties.duration, - decimal.Decimal, - int, - float, - ], - status: "BatchJobStatus", - num_tasks_completed: typing.Union[ - MetaOapg.properties.num_tasks_completed, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - num_tasks_pending: typing.Union[ - MetaOapg.properties.num_tasks_pending, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - result: typing.Union[MetaOapg.properties.result, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetBatchJobV1Response": - return super().__new__( - cls, - *_args, - duration=duration, - status=status, - num_tasks_completed=num_tasks_completed, - num_tasks_pending=num_tasks_pending, - result=result, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_docker_image_batch_job_v1_response.py b/launch/api_client/model/get_docker_image_batch_job_v1_response.py index 6ad5eb30..0015a5a8 100644 --- a/launch/api_client/model/get_docker_image_batch_job_v1_response.py +++ b/launch/api_client/model/get_docker_image_batch_job_v1_response.py @@ -23,89 +23,60 @@ from launch.api_client import schemas # noqa: F401 -class GetDockerImageBatchJobV1Response(schemas.DictSchema): +class GetDockerImageBatchJobV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "status", } - + class properties: + @staticmethod - def status() -> typing.Type["BatchJobStatus"]: + def status() -> typing.Type['BatchJobStatus']: return BatchJobStatus - __annotations__ = { "status": status, } - - status: "BatchJobStatus" - + + status: 'BatchJobStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["status",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["status", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["status",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - status: "BatchJobStatus", + *_args: typing.Union[dict, frozendict.frozendict, ], + status: 'BatchJobStatus', _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetDockerImageBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetDockerImageBatchJobV1Response': return super().__new__( cls, *_args, @@ -114,5 +85,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_file_content_response.py b/launch/api_client/model/get_file_content_response.py index 04a92c4a..6e770f90 100644 --- a/launch/api_client/model/get_file_content_response.py +++ b/launch/api_client/model/get_file_content_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class GetFileContentResponse(schemas.DictSchema): +class GetFileContentResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,104 +34,59 @@ class GetFileContentResponse(schemas.DictSchema): Response object for retrieving a file's content. """ + class MetaOapg: required = { "id", "content", } - + class properties: - content = schemas.StrSchema id = schemas.StrSchema + content = schemas.StrSchema __annotations__ = { - "content": content, "id": id, + "content": content, } - + id: MetaOapg.properties.id content: MetaOapg.properties.content - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "id", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "content", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "id", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "content", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - content: typing.Union[ - MetaOapg.properties.content, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + id: typing.Union[MetaOapg.properties.id, str, ], + content: typing.Union[MetaOapg.properties.content, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFileContentResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetFileContentResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/get_file_content_response.pyi b/launch/api_client/model/get_file_content_response.pyi deleted file mode 100644 index 5437956d..00000000 --- a/launch/api_client/model/get_file_content_response.pyi +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetFileContentResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for retrieving a file's content. - """ - - class MetaOapg: - required = { - "id", - "content", - } - - class properties: - content = schemas.StrSchema - id = schemas.StrSchema - __annotations__ = { - "content": content, - "id": id, - } - id: MetaOapg.properties.id - content: MetaOapg.properties.content - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "id", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "id", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - content: typing.Union[ - MetaOapg.properties.content, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFileContentResponse": - return super().__new__( - cls, - *_args, - id=id, - content=content, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/get_file_response.py b/launch/api_client/model/get_file_response.py index 9fa36069..476e738e 100644 --- a/launch/api_client/model/get_file_response.py +++ b/launch/api_client/model/get_file_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class GetFileResponse(schemas.DictSchema): +class GetFileResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,123 +34,70 @@ class GetFileResponse(schemas.DictSchema): Response object for retrieving a file. """ + class MetaOapg: required = { "filename", "size", "id", } - + class properties: - filename = schemas.StrSchema id = schemas.StrSchema + filename = schemas.StrSchema size = schemas.IntSchema __annotations__ = { - "filename": filename, "id": id, + "filename": filename, "size": size, } - + filename: MetaOapg.properties.filename size: MetaOapg.properties.size id: MetaOapg.properties.id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: - ... - + def __getitem__(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "filename", - "id", - "size", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "filename", "size", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "filename", - "id", - "size", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "filename", "size", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - filename: typing.Union[ - MetaOapg.properties.filename, - str, - ], - size: typing.Union[ - MetaOapg.properties.size, - decimal.Decimal, - int, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + filename: typing.Union[MetaOapg.properties.filename, str, ], + size: typing.Union[MetaOapg.properties.size, decimal.Decimal, int, ], + id: typing.Union[MetaOapg.properties.id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFileResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetFileResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/get_file_response.pyi b/launch/api_client/model/get_file_response.pyi deleted file mode 100644 index 5f5e67e4..00000000 --- a/launch/api_client/model/get_file_response.pyi +++ /dev/null @@ -1,139 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetFileResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for retrieving a file. - """ - - class MetaOapg: - required = { - "filename", - "size", - "id", - } - - class properties: - filename = schemas.StrSchema - id = schemas.StrSchema - size = schemas.IntSchema - __annotations__ = { - "filename": filename, - "id": id, - "size": size, - } - filename: MetaOapg.properties.filename - size: MetaOapg.properties.size - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "filename", - "id", - "size", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["filename"]) -> MetaOapg.properties.filename: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["size"]) -> MetaOapg.properties.size: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "filename", - "id", - "size", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - filename: typing.Union[ - MetaOapg.properties.filename, - str, - ], - size: typing.Union[ - MetaOapg.properties.size, - decimal.Decimal, - int, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFileResponse": - return super().__new__( - cls, - *_args, - filename=filename, - size=size, - id=id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/get_fine_tune_events_response.py b/launch/api_client/model/get_fine_tune_events_response.py index 40be8716..d2b50d5b 100644 --- a/launch/api_client/model/get_fine_tune_events_response.py +++ b/launch/api_client/model/get_fine_tune_events_response.py @@ -23,109 +23,82 @@ from launch.api_client import schemas # noqa: F401 -class GetFineTuneEventsResponse(schemas.DictSchema): +class GetFineTuneEventsResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "events", } - + class properties: - class events(schemas.ListSchema): + + + class events( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["LLMFineTuneEvent"]: + def items() -> typing.Type['LLMFineTuneEvent']: return LLMFineTuneEvent - + def __new__( cls, - _arg: typing.Union[typing.Tuple["LLMFineTuneEvent"], typing.List["LLMFineTuneEvent"]], + _arg: typing.Union[typing.Tuple['LLMFineTuneEvent'], typing.List['LLMFineTuneEvent']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "events": + ) -> 'events': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "LLMFineTuneEvent": + + def __getitem__(self, i: int) -> 'LLMFineTuneEvent': return super().__getitem__(i) - __annotations__ = { "events": events, } - + events: MetaOapg.properties.events - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: - ... - + def __getitem__(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["events",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["events", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["events",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["events", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - events: typing.Union[ - MetaOapg.properties.events, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + events: typing.Union[MetaOapg.properties.events, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFineTuneEventsResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetFineTuneEventsResponse': return super().__new__( cls, *_args, @@ -134,5 +107,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.llm_fine_tune_event import LLMFineTuneEvent diff --git a/launch/api_client/model/get_fine_tune_events_response.pyi b/launch/api_client/model/get_fine_tune_events_response.pyi deleted file mode 100644 index ae72d3f8..00000000 --- a/launch/api_client/model/get_fine_tune_events_response.pyi +++ /dev/null @@ -1,121 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetFineTuneEventsResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "events", - } - - class properties: - class events(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["LLMFineTuneEvent"]: - return LLMFineTuneEvent - def __new__( - cls, - _arg: typing.Union[typing.Tuple["LLMFineTuneEvent"], typing.List["LLMFineTuneEvent"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "events": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "LLMFineTuneEvent": - return super().__getitem__(i) - __annotations__ = { - "events": events, - } - events: MetaOapg.properties.events - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["events",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["events"]) -> MetaOapg.properties.events: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["events",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - events: typing.Union[ - MetaOapg.properties.events, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFineTuneEventsResponse": - return super().__new__( - cls, - *_args, - events=events, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.llm_fine_tune_event import LLMFineTuneEvent diff --git a/launch/api_client/model/get_fine_tune_job_response.pyi b/launch/api_client/model/get_fine_tune_job_response.pyi deleted file mode 100644 index ffffe7c4..00000000 --- a/launch/api_client/model/get_fine_tune_job_response.pyi +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetFineTuneResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "fine_tune_id", - "status", - } - - class properties: - fine_tune_id = schemas.StrSchema - - @staticmethod - def status() -> typing.Type["BatchJobStatus"]: - return BatchJobStatus - __annotations__ = { - "fine_tune_id": fine_tune_id, - "status": status, - } - fine_tune_id: MetaOapg.properties.fine_tune_id - status: "BatchJobStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "fine_tune_id", - "status", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["fine_tune_id"]) -> MetaOapg.properties.fine_tune_id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "fine_tune_id", - "status", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - fine_tune_id: typing.Union[ - MetaOapg.properties.fine_tune_id, - str, - ], - status: "BatchJobStatus", - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFineTuneResponse": - return super().__new__( - cls, - *_args, - fine_tune_id=fine_tune_id, - status=status, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_fine_tune_response.py b/launch/api_client/model/get_fine_tune_response.py index bb3f17c3..d8e32316 100644 --- a/launch/api_client/model/get_fine_tune_response.py +++ b/launch/api_client/model/get_fine_tune_response.py @@ -23,204 +23,99 @@ from launch.api_client import schemas # noqa: F401 -class GetFineTuneResponse(schemas.DictSchema): +class GetFineTuneResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "id", "status", } - + class properties: id = schemas.StrSchema - - class status( - schemas.ComposedSchema, + + @staticmethod + def status() -> typing.Type['BatchJobStatus']: + return BatchJobStatus + + + class fine_tuned_model( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - BatchJobStatus, - ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[None, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "status": + ) -> 'fine_tuned_model': return super().__new__( cls, *_args, _configuration=_configuration, - **kwargs, ) - - fine_tuned_model = schemas.StrSchema __annotations__ = { "id": id, "status": status, "fine_tuned_model": fine_tuned_model, } - + id: MetaOapg.properties.id - status: MetaOapg.properties.status - + status: 'BatchJobStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> MetaOapg.properties.status: - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["fine_tuned_model"]) -> MetaOapg.properties.fine_tuned_model: - ... - + def __getitem__(self, name: typing_extensions.Literal["fine_tuned_model"]) -> MetaOapg.properties.fine_tuned_model: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "status", - "fine_tuned_model", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "status", "fine_tuned_model", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> MetaOapg.properties.status: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchJobStatus': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["fine_tuned_model"] - ) -> typing.Union[MetaOapg.properties.fine_tuned_model, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["fine_tuned_model"]) -> typing.Union[MetaOapg.properties.fine_tuned_model, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "status", - "fine_tuned_model", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "status", "fine_tuned_model", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - status: typing.Union[ - MetaOapg.properties.status, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - fine_tuned_model: typing.Union[MetaOapg.properties.fine_tuned_model, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + id: typing.Union[MetaOapg.properties.id, str, ], + status: 'BatchJobStatus', + fine_tuned_model: typing.Union[MetaOapg.properties.fine_tuned_model, None, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFineTuneResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetFineTuneResponse': return super().__new__( cls, *_args, @@ -231,5 +126,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_fine_tune_response.pyi b/launch/api_client/model/get_fine_tune_response.pyi deleted file mode 100644 index bcf5f023..00000000 --- a/launch/api_client/model/get_fine_tune_response.pyi +++ /dev/null @@ -1,213 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetFineTuneResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "id", - "status", - } - - class properties: - id = schemas.StrSchema - - class status( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def all_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - BatchJobStatus, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "status": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - fine_tuned_model = schemas.StrSchema - __annotations__ = { - "id": id, - "status": status, - "fine_tuned_model": fine_tuned_model, - } - id: MetaOapg.properties.id - status: MetaOapg.properties.status - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> MetaOapg.properties.status: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["fine_tuned_model"] - ) -> MetaOapg.properties.fine_tuned_model: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "status", - "fine_tuned_model", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> MetaOapg.properties.status: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["fine_tuned_model"] - ) -> typing.Union[MetaOapg.properties.fine_tuned_model, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "status", - "fine_tuned_model", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - status: typing.Union[ - MetaOapg.properties.status, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - fine_tuned_model: typing.Union[MetaOapg.properties.fine_tuned_model, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetFineTuneResponse": - return super().__new__( - cls, - *_args, - id=id, - status=status, - fine_tuned_model=fine_tuned_model, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/get_llm_model_endpoint_v1_response.py b/launch/api_client/model/get_llm_model_endpoint_v1_response.py index 0fd0eb0b..ee0c86e3 100644 --- a/launch/api_client/model/get_llm_model_endpoint_v1_response.py +++ b/launch/api_client/model/get_llm_model_endpoint_v1_response.py @@ -23,13 +23,16 @@ from launch.api_client import schemas # noqa: F401 -class GetLLMModelEndpointV1Response(schemas.DictSchema): +class GetLLMModelEndpointV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "inference_framework", @@ -39,253 +42,238 @@ class MetaOapg: "source", "status", } - + class properties: id = schemas.StrSchema - - @staticmethod - def inference_framework() -> typing.Type["LLMInferenceFramework"]: - return LLMInferenceFramework - - model_name = schemas.StrSchema name = schemas.StrSchema - + model_name = schemas.StrSchema + @staticmethod - def source() -> typing.Type["LLMSource"]: + def source() -> typing.Type['LLMSource']: return LLMSource - + @staticmethod - def status() -> typing.Type["ModelEndpointStatus"]: + def status() -> typing.Type['ModelEndpointStatus']: return ModelEndpointStatus - - checkpoint_path = schemas.StrSchema - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - + + @staticmethod + def inference_framework() -> typing.Type['LLMInferenceFramework']: + return LLMInferenceFramework + + + class inference_framework_image_tag( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'inference_framework_image_tag': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_shards( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_shards': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def quantize() -> typing.Type["Quantization"]: + def quantize() -> typing.Type['Quantization']: return Quantization - + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def spec() -> typing.Type["GetModelEndpointV1Response"]: + def spec() -> typing.Type['GetModelEndpointV1Response']: return GetModelEndpointV1Response - __annotations__ = { "id": id, - "inference_framework": inference_framework, - "model_name": model_name, "name": name, + "model_name": model_name, "source": source, "status": status, - "checkpoint_path": checkpoint_path, + "inference_framework": inference_framework, "inference_framework_image_tag": inference_framework_image_tag, "num_shards": num_shards, "quantize": quantize, + "checkpoint_path": checkpoint_path, + "chat_template_override": chat_template_override, "spec": spec, } - - inference_framework: "LLMInferenceFramework" + + inference_framework: 'LLMInferenceFramework' model_name: MetaOapg.properties.model_name name: MetaOapg.properties.name id: MetaOapg.properties.id - source: "LLMSource" - status: "ModelEndpointStatus" - + source: 'LLMSource' + status: 'ModelEndpointStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> "LLMInferenceFramework": - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> "LLMSource": - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": - ... - + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> 'LLMInferenceFramework': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: - ... - + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> MetaOapg.properties.inference_framework_image_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: - ... - + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": - ... - + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["spec"]) -> "GetModelEndpointV1Response": - ... - + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "inference_framework", - "model_name", - "name", - "source", - "status", - "checkpoint_path", - "inference_framework_image_tag", - "num_shards", - "quantize", - "spec", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["spec"]) -> 'GetModelEndpointV1Response': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "model_name", "source", "status", "inference_framework", "inference_framework_image_tag", "num_shards", "quantize", "checkpoint_path", "chat_template_override", "spec", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> "LLMInferenceFramework": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> "LLMSource": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> 'LLMInferenceFramework': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union["Quantization", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["spec"] - ) -> typing.Union["GetModelEndpointV1Response", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "inference_framework", - "model_name", - "name", - "source", - "status", - "checkpoint_path", - "inference_framework_image_tag", - "num_shards", - "quantize", - "spec", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["spec"]) -> typing.Union['GetModelEndpointV1Response', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "model_name", "source", "status", "inference_framework", "inference_framework_image_tag", "num_shards", "quantize", "checkpoint_path", "chat_template_override", "spec", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - inference_framework: "LLMInferenceFramework", - model_name: typing.Union[ - MetaOapg.properties.model_name, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - source: "LLMSource", - status: "ModelEndpointStatus", - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[ - MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset - ] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - spec: typing.Union["GetModelEndpointV1Response", schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + inference_framework: 'LLMInferenceFramework', + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + name: typing.Union[MetaOapg.properties.name, str, ], + id: typing.Union[MetaOapg.properties.id, str, ], + source: 'LLMSource', + status: 'ModelEndpointStatus', + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + spec: typing.Union['GetModelEndpointV1Response', schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetLLMModelEndpointV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetLLMModelEndpointV1Response': return super().__new__( cls, *_args, @@ -295,16 +283,16 @@ def __new__( id=id, source=source, status=status, - checkpoint_path=checkpoint_path, inference_framework_image_tag=inference_framework_image_tag, num_shards=num_shards, quantize=quantize, + checkpoint_path=checkpoint_path, + chat_template_override=chat_template_override, spec=spec, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.get_model_endpoint_v1_response import ( GetModelEndpointV1Response, ) diff --git a/launch/api_client/model/get_llm_model_endpoint_v1_response.pyi b/launch/api_client/model/get_llm_model_endpoint_v1_response.pyi deleted file mode 100644 index 91d4d626..00000000 --- a/launch/api_client/model/get_llm_model_endpoint_v1_response.pyi +++ /dev/null @@ -1,259 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetLLMModelEndpointV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "inference_framework", - "model_name", - "name", - "id", - "source", - "status", - } - - class properties: - id = schemas.StrSchema - - @staticmethod - def inference_framework() -> typing.Type["LLMInferenceFramework"]: - return LLMInferenceFramework - model_name = schemas.StrSchema - name = schemas.StrSchema - - @staticmethod - def source() -> typing.Type["LLMSource"]: - return LLMSource - @staticmethod - def status() -> typing.Type["ModelEndpointStatus"]: - return ModelEndpointStatus - checkpoint_path = schemas.StrSchema - inference_framework_image_tag = schemas.StrSchema - num_shards = schemas.IntSchema - - @staticmethod - def quantize() -> typing.Type["Quantization"]: - return Quantization - @staticmethod - def spec() -> typing.Type["GetModelEndpointV1Response"]: - return GetModelEndpointV1Response - __annotations__ = { - "id": id, - "inference_framework": inference_framework, - "model_name": model_name, - "name": name, - "source": source, - "status": status, - "checkpoint_path": checkpoint_path, - "inference_framework_image_tag": inference_framework_image_tag, - "num_shards": num_shards, - "quantize": quantize, - "spec": spec, - } - inference_framework: "LLMInferenceFramework" - model_name: MetaOapg.properties.model_name - name: MetaOapg.properties.name - id: MetaOapg.properties.id - source: "LLMSource" - status: "ModelEndpointStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> "LLMInferenceFramework": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> "LLMSource": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> MetaOapg.properties.checkpoint_path: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> MetaOapg.properties.inference_framework_image_tag: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["spec"]) -> "GetModelEndpointV1Response": ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "inference_framework", - "model_name", - "name", - "source", - "status", - "checkpoint_path", - "inference_framework_image_tag", - "num_shards", - "quantize", - "spec", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> "LLMInferenceFramework": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> "LLMSource": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["quantize"] - ) -> typing.Union["Quantization", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["spec"] - ) -> typing.Union["GetModelEndpointV1Response", schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "id", - "inference_framework", - "model_name", - "name", - "source", - "status", - "checkpoint_path", - "inference_framework_image_tag", - "num_shards", - "quantize", - "spec", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - inference_framework: "LLMInferenceFramework", - model_name: typing.Union[ - MetaOapg.properties.model_name, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - source: "LLMSource", - status: "ModelEndpointStatus", - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[ - MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset - ] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - spec: typing.Union["GetModelEndpointV1Response", schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetLLMModelEndpointV1Response": - return super().__new__( - cls, - *_args, - inference_framework=inference_framework, - model_name=model_name, - name=name, - id=id, - source=source, - status=status, - checkpoint_path=checkpoint_path, - inference_framework_image_tag=inference_framework_image_tag, - num_shards=num_shards, - quantize=quantize, - spec=spec, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) -from launch_client.model.llm_inference_framework import LLMInferenceFramework -from launch_client.model.llm_source import LLMSource -from launch_client.model.model_endpoint_status import ModelEndpointStatus -from launch_client.model.quantization import Quantization diff --git a/launch/api_client/model/get_model_endpoint_v1_response.py b/launch/api_client/model/get_model_endpoint_v1_response.py index c59b193e..8d2012b2 100644 --- a/launch/api_client/model/get_model_endpoint_v1_response.py +++ b/launch/api_client/model/get_model_endpoint_v1_response.py @@ -23,13 +23,16 @@ from launch.api_client import schemas # noqa: F401 -class GetModelEndpointV1Response(schemas.DictSchema): +class GetModelEndpointV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "endpoint_type", @@ -42,145 +45,270 @@ class MetaOapg: "created_by", "status", } - + class properties: - bundle_name = schemas.StrSchema - created_at = schemas.DateTimeSchema - created_by = schemas.StrSchema - destination = schemas.StrSchema - - @staticmethod - def endpoint_type() -> typing.Type["ModelEndpointType"]: - return ModelEndpointType - id = schemas.StrSchema - last_updated_at = schemas.DateTimeSchema name = schemas.StrSchema - + @staticmethod - def status() -> typing.Type["ModelEndpointStatus"]: - return ModelEndpointStatus - - aws_role = schemas.StrSchema - + def endpoint_type() -> typing.Type['ModelEndpointType']: + return ModelEndpointType + destination = schemas.StrSchema + bundle_name = schemas.StrSchema + @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): + def status() -> typing.Type['ModelEndpointStatus']: + return ModelEndpointStatus + created_by = schemas.StrSchema + created_at = schemas.DateTimeSchema + last_updated_at = schemas.DateTimeSchema + + + class deployment_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'deployment_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: - format = "uri" - max_length = 2083 - min_length = 1 - - deployment_name = schemas.StrSchema - + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def deployment_state() -> typing.Type["ModelEndpointDeploymentState"]: - return ModelEndpointDeploymentState - - class labels(schemas.DictSchema): + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class labels( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - metadata = schemas.DictSchema - num_queued_items = schemas.IntSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - + + + class aws_role( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[None, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": + ) -> 'aws_role': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - public_inference = schemas.BoolSchema - + + + class results_s3_bucket( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'results_s3_bucket': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def resource_state() -> typing.Type["ModelEndpointResourceState"]: + def deployment_state() -> typing.Type['ModelEndpointDeploymentState']: + return ModelEndpointDeploymentState + + @staticmethod + def resource_state() -> typing.Type['ModelEndpointResourceState']: return ModelEndpointResourceState - - results_s3_bucket = schemas.StrSchema + + + class num_queued_items( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_queued_items': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "bundle_name": bundle_name, - "created_at": created_at, - "created_by": created_by, - "destination": destination, - "endpoint_type": endpoint_type, "id": id, - "last_updated_at": last_updated_at, "name": name, + "endpoint_type": endpoint_type, + "destination": destination, + "bundle_name": bundle_name, "status": status, - "aws_role": aws_role, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, + "created_by": created_by, + "created_at": created_at, + "last_updated_at": last_updated_at, "deployment_name": deployment_name, - "deployment_state": deployment_state, - "labels": labels, "metadata": metadata, - "num_queued_items": num_queued_items, "post_inference_hooks": post_inference_hooks, - "public_inference": public_inference, - "resource_state": resource_state, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "labels": labels, + "aws_role": aws_role, "results_s3_bucket": results_s3_bucket, + "deployment_state": deployment_state, + "resource_state": resource_state, + "num_queued_items": num_queued_items, + "public_inference": public_inference, } - - endpoint_type: "ModelEndpointType" + + endpoint_type: 'ModelEndpointType' last_updated_at: MetaOapg.properties.last_updated_at destination: MetaOapg.properties.destination name: MetaOapg.properties.name @@ -188,353 +316,176 @@ def resource_state() -> typing.Type["ModelEndpointResourceState"]: bundle_name: MetaOapg.properties.bundle_name id: MetaOapg.properties.id created_by: MetaOapg.properties.created_by - status: "ModelEndpointStatus" - + status: 'ModelEndpointStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: - ... - + def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: - ... - + def __getitem__(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": - ... - + def __getitem__(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["last_updated_at"]) -> MetaOapg.properties.last_updated_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": - ... - + def __getitem__(self, name: typing_extensions.Literal["last_updated_at"]) -> MetaOapg.properties.last_updated_at: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["aws_role"]) -> MetaOapg.properties.aws_role: - ... - + def __getitem__(self, name: typing_extensions.Literal["deployment_name"]) -> MetaOapg.properties.deployment_name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": - ... - + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: - ... - + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deployment_name"]) -> MetaOapg.properties.deployment_name: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deployment_state"]) -> "ModelEndpointDeploymentState": - ... - + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["aws_role"]) -> MetaOapg.properties.aws_role: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_queued_items"]) -> MetaOapg.properties.num_queued_items: - ... - + def __getitem__(self, name: typing_extensions.Literal["results_s3_bucket"]) -> MetaOapg.properties.results_s3_bucket: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: - ... - + def __getitem__(self, name: typing_extensions.Literal["deployment_state"]) -> 'ModelEndpointDeploymentState': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: - ... - + def __getitem__(self, name: typing_extensions.Literal["resource_state"]) -> 'ModelEndpointResourceState': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_state"]) -> "ModelEndpointResourceState": - ... - + def __getitem__(self, name: typing_extensions.Literal["num_queued_items"]) -> MetaOapg.properties.num_queued_items: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["results_s3_bucket"] - ) -> MetaOapg.properties.results_s3_bucket: - ... - + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_name", - "created_at", - "created_by", - "destination", - "endpoint_type", - "id", - "last_updated_at", - "name", - "status", - "aws_role", - "default_callback_auth", - "default_callback_url", - "deployment_name", - "deployment_state", - "labels", - "metadata", - "num_queued_items", - "post_inference_hooks", - "public_inference", - "resource_state", - "results_s3_bucket", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "endpoint_type", "destination", "bundle_name", "status", "created_by", "created_at", "last_updated_at", "deployment_name", "metadata", "post_inference_hooks", "default_callback_url", "default_callback_auth", "labels", "aws_role", "results_s3_bucket", "deployment_state", "resource_state", "num_queued_items", "public_inference", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> 'ModelEndpointType': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'ModelEndpointStatus': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["last_updated_at"]) -> MetaOapg.properties.last_updated_at: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["last_updated_at"]) -> MetaOapg.properties.last_updated_at: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["aws_role"] - ) -> typing.Union[MetaOapg.properties.aws_role, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["deployment_name"]) -> typing.Union[MetaOapg.properties.deployment_name, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["deployment_name"] - ) -> typing.Union[MetaOapg.properties.deployment_name, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["deployment_state"] - ) -> typing.Union["ModelEndpointDeploymentState", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["labels"] - ) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["aws_role"]) -> typing.Union[MetaOapg.properties.aws_role, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_queued_items"] - ) -> typing.Union[MetaOapg.properties.num_queued_items, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["results_s3_bucket"]) -> typing.Union[MetaOapg.properties.results_s3_bucket, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["deployment_state"]) -> typing.Union['ModelEndpointDeploymentState', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["resource_state"]) -> typing.Union['ModelEndpointResourceState', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["resource_state"] - ) -> typing.Union["ModelEndpointResourceState", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["num_queued_items"]) -> typing.Union[MetaOapg.properties.num_queued_items, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["results_s3_bucket"] - ) -> typing.Union[MetaOapg.properties.results_s3_bucket, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_name", - "created_at", - "created_by", - "destination", - "endpoint_type", - "id", - "last_updated_at", - "name", - "status", - "aws_role", - "default_callback_auth", - "default_callback_url", - "deployment_name", - "deployment_state", - "labels", - "metadata", - "num_queued_items", - "post_inference_hooks", - "public_inference", - "resource_state", - "results_s3_bucket", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "endpoint_type", "destination", "bundle_name", "status", "created_by", "created_at", "last_updated_at", "deployment_name", "metadata", "post_inference_hooks", "default_callback_url", "default_callback_auth", "labels", "aws_role", "results_s3_bucket", "deployment_state", "resource_state", "num_queued_items", "public_inference", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_type: "ModelEndpointType", - last_updated_at: typing.Union[ - MetaOapg.properties.last_updated_at, - str, - datetime, - ], - destination: typing.Union[ - MetaOapg.properties.destination, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - bundle_name: typing.Union[ - MetaOapg.properties.bundle_name, - str, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - created_by: typing.Union[ - MetaOapg.properties.created_by, - str, - ], - status: "ModelEndpointStatus", - aws_role: typing.Union[MetaOapg.properties.aws_role, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - deployment_name: typing.Union[MetaOapg.properties.deployment_name, str, schemas.Unset] = schemas.unset, - deployment_state: typing.Union["ModelEndpointDeploymentState", schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - num_queued_items: typing.Union[ - MetaOapg.properties.num_queued_items, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - resource_state: typing.Union["ModelEndpointResourceState", schemas.Unset] = schemas.unset, - results_s3_bucket: typing.Union[MetaOapg.properties.results_s3_bucket, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + endpoint_type: 'ModelEndpointType', + last_updated_at: typing.Union[MetaOapg.properties.last_updated_at, str, datetime, ], + destination: typing.Union[MetaOapg.properties.destination, str, ], + name: typing.Union[MetaOapg.properties.name, str, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], + bundle_name: typing.Union[MetaOapg.properties.bundle_name, str, ], + id: typing.Union[MetaOapg.properties.id, str, ], + created_by: typing.Union[MetaOapg.properties.created_by, str, ], + status: 'ModelEndpointStatus', + deployment_name: typing.Union[MetaOapg.properties.deployment_name, None, str, schemas.Unset] = schemas.unset, + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + aws_role: typing.Union[MetaOapg.properties.aws_role, None, str, schemas.Unset] = schemas.unset, + results_s3_bucket: typing.Union[MetaOapg.properties.results_s3_bucket, None, str, schemas.Unset] = schemas.unset, + deployment_state: typing.Union['ModelEndpointDeploymentState', schemas.Unset] = schemas.unset, + resource_state: typing.Union['ModelEndpointResourceState', schemas.Unset] = schemas.unset, + num_queued_items: typing.Union[MetaOapg.properties.num_queued_items, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetModelEndpointV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetModelEndpointV1Response': return super().__new__( cls, *_args, @@ -547,23 +498,22 @@ def __new__( id=id, created_by=created_by, status=status, - aws_role=aws_role, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, deployment_name=deployment_name, - deployment_state=deployment_state, - labels=labels, metadata=metadata, - num_queued_items=num_queued_items, post_inference_hooks=post_inference_hooks, - public_inference=public_inference, - resource_state=resource_state, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + labels=labels, + aws_role=aws_role, results_s3_bucket=results_s3_bucket, + deployment_state=deployment_state, + resource_state=resource_state, + num_queued_items=num_queued_items, + public_inference=public_inference, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.callback_auth import CallbackAuth from launch.api_client.model.model_endpoint_deployment_state import ( ModelEndpointDeploymentState, diff --git a/launch/api_client/model/get_model_endpoint_v1_response.pyi b/launch/api_client/model/get_model_endpoint_v1_response.pyi deleted file mode 100644 index e5790416..00000000 --- a/launch/api_client/model/get_model_endpoint_v1_response.pyi +++ /dev/null @@ -1,477 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetModelEndpointV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "endpoint_type", - "last_updated_at", - "destination", - "name", - "created_at", - "bundle_name", - "id", - "created_by", - "status", - } - - class properties: - bundle_name = schemas.StrSchema - created_at = schemas.DateTimeSchema - created_by = schemas.StrSchema - destination = schemas.StrSchema - - @staticmethod - def endpoint_type() -> typing.Type["ModelEndpointType"]: - return ModelEndpointType - id = schemas.StrSchema - last_updated_at = schemas.DateTimeSchema - name = schemas.StrSchema - - @staticmethod - def status() -> typing.Type["ModelEndpointStatus"]: - return ModelEndpointStatus - aws_role = schemas.StrSchema - - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - pass - deployment_name = schemas.StrSchema - - @staticmethod - def deployment_state() -> typing.Type["ModelEndpointDeploymentState"]: - return ModelEndpointDeploymentState - - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - metadata = schemas.DictSchema - num_queued_items = schemas.IntSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - public_inference = schemas.BoolSchema - - @staticmethod - def resource_state() -> typing.Type["ModelEndpointResourceState"]: - return ModelEndpointResourceState - results_s3_bucket = schemas.StrSchema - __annotations__ = { - "bundle_name": bundle_name, - "created_at": created_at, - "created_by": created_by, - "destination": destination, - "endpoint_type": endpoint_type, - "id": id, - "last_updated_at": last_updated_at, - "name": name, - "status": status, - "aws_role": aws_role, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "deployment_name": deployment_name, - "deployment_state": deployment_state, - "labels": labels, - "metadata": metadata, - "num_queued_items": num_queued_items, - "post_inference_hooks": post_inference_hooks, - "public_inference": public_inference, - "resource_state": resource_state, - "results_s3_bucket": results_s3_bucket, - } - endpoint_type: "ModelEndpointType" - last_updated_at: MetaOapg.properties.last_updated_at - destination: MetaOapg.properties.destination - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - bundle_name: MetaOapg.properties.bundle_name - id: MetaOapg.properties.id - created_by: MetaOapg.properties.created_by - status: "ModelEndpointStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["last_updated_at"] - ) -> MetaOapg.properties.last_updated_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["aws_role"]) -> MetaOapg.properties.aws_role: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["deployment_name"] - ) -> MetaOapg.properties.deployment_name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deployment_state"]) -> "ModelEndpointDeploymentState": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["num_queued_items"] - ) -> MetaOapg.properties.num_queued_items: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["public_inference"] - ) -> MetaOapg.properties.public_inference: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["resource_state"]) -> "ModelEndpointResourceState": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["results_s3_bucket"] - ) -> MetaOapg.properties.results_s3_bucket: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_name", - "created_at", - "created_by", - "destination", - "endpoint_type", - "id", - "last_updated_at", - "name", - "status", - "aws_role", - "default_callback_auth", - "default_callback_url", - "deployment_name", - "deployment_state", - "labels", - "metadata", - "num_queued_items", - "post_inference_hooks", - "public_inference", - "resource_state", - "results_s3_bucket", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["bundle_name"]) -> MetaOapg.properties.bundle_name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["destination"]) -> MetaOapg.properties.destination: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["endpoint_type"]) -> "ModelEndpointType": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["last_updated_at"] - ) -> MetaOapg.properties.last_updated_at: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "ModelEndpointStatus": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["aws_role"] - ) -> typing.Union[MetaOapg.properties.aws_role, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["deployment_name"] - ) -> typing.Union[MetaOapg.properties.deployment_name, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["deployment_state"] - ) -> typing.Union["ModelEndpointDeploymentState", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["labels"] - ) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_queued_items"] - ) -> typing.Union[MetaOapg.properties.num_queued_items, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["resource_state"] - ) -> typing.Union["ModelEndpointResourceState", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["results_s3_bucket"] - ) -> typing.Union[MetaOapg.properties.results_s3_bucket, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "bundle_name", - "created_at", - "created_by", - "destination", - "endpoint_type", - "id", - "last_updated_at", - "name", - "status", - "aws_role", - "default_callback_auth", - "default_callback_url", - "deployment_name", - "deployment_state", - "labels", - "metadata", - "num_queued_items", - "post_inference_hooks", - "public_inference", - "resource_state", - "results_s3_bucket", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_type: "ModelEndpointType", - last_updated_at: typing.Union[ - MetaOapg.properties.last_updated_at, - str, - datetime, - ], - destination: typing.Union[ - MetaOapg.properties.destination, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - bundle_name: typing.Union[ - MetaOapg.properties.bundle_name, - str, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - created_by: typing.Union[ - MetaOapg.properties.created_by, - str, - ], - status: "ModelEndpointStatus", - aws_role: typing.Union[MetaOapg.properties.aws_role, str, schemas.Unset] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - deployment_name: typing.Union[MetaOapg.properties.deployment_name, str, schemas.Unset] = schemas.unset, - deployment_state: typing.Union["ModelEndpointDeploymentState", schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - num_queued_items: typing.Union[ - MetaOapg.properties.num_queued_items, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - resource_state: typing.Union["ModelEndpointResourceState", schemas.Unset] = schemas.unset, - results_s3_bucket: typing.Union[MetaOapg.properties.results_s3_bucket, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetModelEndpointV1Response": - return super().__new__( - cls, - *_args, - endpoint_type=endpoint_type, - last_updated_at=last_updated_at, - destination=destination, - name=name, - created_at=created_at, - bundle_name=bundle_name, - id=id, - created_by=created_by, - status=status, - aws_role=aws_role, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - deployment_name=deployment_name, - deployment_state=deployment_state, - labels=labels, - metadata=metadata, - num_queued_items=num_queued_items, - post_inference_hooks=post_inference_hooks, - public_inference=public_inference, - resource_state=resource_state, - results_s3_bucket=results_s3_bucket, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.callback_auth import CallbackAuth -from launch_client.model.model_endpoint_deployment_state import ( - ModelEndpointDeploymentState, -) -from launch_client.model.model_endpoint_resource_state import ( - ModelEndpointResourceState, -) -from launch_client.model.model_endpoint_status import ModelEndpointStatus -from launch_client.model.model_endpoint_type import ModelEndpointType diff --git a/launch/api_client/model/get_trigger_v1_response.py b/launch/api_client/model/get_trigger_v1_response.py index 3cec33f2..9905aeaf 100644 --- a/launch/api_client/model/get_trigger_v1_response.py +++ b/launch/api_client/model/get_trigger_v1_response.py @@ -23,13 +23,16 @@ from launch.api_client import schemas # noqa: F401 -class GetTriggerV1Response(schemas.DictSchema): +class GetTriggerV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "owner", @@ -40,65 +43,93 @@ class MetaOapg: "id", "created_by", } - + class properties: - created_at = schemas.DateTimeSchema - created_by = schemas.StrSchema - cron_schedule = schemas.StrSchema - docker_image_batch_job_bundle_id = schemas.StrSchema id = schemas.StrSchema name = schemas.StrSchema owner = schemas.StrSchema - default_job_config = schemas.DictSchema - - class default_job_metadata(schemas.DictSchema): + created_by = schemas.StrSchema + created_at = schemas.DateTimeSchema + cron_schedule = schemas.StrSchema + docker_image_batch_job_bundle_id = schemas.StrSchema + + + class default_job_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'default_job_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_job_metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "default_job_metadata": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'default_job_metadata': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - __annotations__ = { - "created_at": created_at, - "created_by": created_by, - "cron_schedule": cron_schedule, - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, "id": id, "name": name, "owner": owner, + "created_by": created_by, + "created_at": created_at, + "cron_schedule": cron_schedule, + "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, "default_job_config": default_job_config, "default_job_metadata": default_job_metadata, } - + owner: MetaOapg.properties.owner cron_schedule: MetaOapg.properties.cron_schedule docker_image_batch_job_bundle_id: MetaOapg.properties.docker_image_batch_job_bundle_id @@ -106,197 +137,91 @@ def __new__( created_at: MetaOapg.properties.created_at id: MetaOapg.properties.id created_by: MetaOapg.properties.created_by - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: - ... - + def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: - ... - + def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_config"] - ) -> MetaOapg.properties.default_job_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_job_config"]) -> MetaOapg.properties.default_job_config: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> MetaOapg.properties.default_job_metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_job_metadata"]) -> MetaOapg.properties.default_job_metadata: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "cron_schedule", - "docker_image_batch_job_bundle_id", - "id", - "name", - "owner", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "owner", "created_by", "created_at", "cron_schedule", "docker_image_batch_job_bundle_id", "default_job_config", "default_job_metadata", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"]) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_config"] - ) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_job_config"]) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_job_metadata"]) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "cron_schedule", - "docker_image_batch_job_bundle_id", - "id", - "name", - "owner", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "owner", "created_by", "created_at", "cron_schedule", "docker_image_batch_job_bundle_id", "default_job_config", "default_job_metadata", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - owner: typing.Union[ - MetaOapg.properties.owner, - str, - ], - cron_schedule: typing.Union[ - MetaOapg.properties.cron_schedule, - str, - ], - docker_image_batch_job_bundle_id: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_id, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - created_by: typing.Union[ - MetaOapg.properties.created_by, - str, - ], - default_job_config: typing.Union[ - MetaOapg.properties.default_job_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - default_job_metadata: typing.Union[ - MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + owner: typing.Union[MetaOapg.properties.owner, str, ], + cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, str, ], + docker_image_batch_job_bundle_id: typing.Union[MetaOapg.properties.docker_image_batch_job_bundle_id, str, ], + name: typing.Union[MetaOapg.properties.name, str, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], + id: typing.Union[MetaOapg.properties.id, str, ], + created_by: typing.Union[MetaOapg.properties.created_by, str, ], + default_job_config: typing.Union[MetaOapg.properties.default_job_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_job_metadata: typing.Union[MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetTriggerV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'GetTriggerV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/get_trigger_v1_response.pyi b/launch/api_client/model/get_trigger_v1_response.pyi deleted file mode 100644 index 7d282683..00000000 --- a/launch/api_client/model/get_trigger_v1_response.pyi +++ /dev/null @@ -1,265 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GetTriggerV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "owner", - "cron_schedule", - "docker_image_batch_job_bundle_id", - "name", - "created_at", - "id", - "created_by", - } - - class properties: - created_at = schemas.DateTimeSchema - created_by = schemas.StrSchema - cron_schedule = schemas.StrSchema - docker_image_batch_job_bundle_id = schemas.StrSchema - id = schemas.StrSchema - name = schemas.StrSchema - owner = schemas.StrSchema - default_job_config = schemas.DictSchema - - class default_job_metadata(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "default_job_metadata": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "created_at": created_at, - "created_by": created_by, - "cron_schedule": cron_schedule, - "docker_image_batch_job_bundle_id": docker_image_batch_job_bundle_id, - "id": id, - "name": name, - "owner": owner, - "default_job_config": default_job_config, - "default_job_metadata": default_job_metadata, - } - owner: MetaOapg.properties.owner - cron_schedule: MetaOapg.properties.cron_schedule - docker_image_batch_job_bundle_id: MetaOapg.properties.docker_image_batch_job_bundle_id - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - created_by: MetaOapg.properties.created_by - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_config"] - ) -> MetaOapg.properties.default_job_config: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> MetaOapg.properties.default_job_metadata: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "cron_schedule", - "docker_image_batch_job_bundle_id", - "id", - "name", - "owner", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_by"]) -> MetaOapg.properties.created_by: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundle_id"] - ) -> MetaOapg.properties.docker_image_batch_job_bundle_id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["owner"]) -> MetaOapg.properties.owner: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_config"] - ) -> typing.Union[MetaOapg.properties.default_job_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_job_metadata"] - ) -> typing.Union[MetaOapg.properties.default_job_metadata, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "created_by", - "cron_schedule", - "docker_image_batch_job_bundle_id", - "id", - "name", - "owner", - "default_job_config", - "default_job_metadata", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - owner: typing.Union[ - MetaOapg.properties.owner, - str, - ], - cron_schedule: typing.Union[ - MetaOapg.properties.cron_schedule, - str, - ], - docker_image_batch_job_bundle_id: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundle_id, - str, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - created_by: typing.Union[ - MetaOapg.properties.created_by, - str, - ], - default_job_config: typing.Union[ - MetaOapg.properties.default_job_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - default_job_metadata: typing.Union[ - MetaOapg.properties.default_job_metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetTriggerV1Response": - return super().__new__( - cls, - *_args, - owner=owner, - cron_schedule=cron_schedule, - docker_image_batch_job_bundle_id=docker_image_batch_job_bundle_id, - name=name, - created_at=created_at, - id=id, - created_by=created_by, - default_job_config=default_job_config, - default_job_metadata=default_job_metadata, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/gpu_type.py b/launch/api_client/model/gpu_type.py index 1e246b17..599f505f 100644 --- a/launch/api_client/model/gpu_type.py +++ b/launch/api_client/model/gpu_type.py @@ -23,7 +23,10 @@ from launch.api_client import schemas # noqa: F401 -class GpuType(schemas.EnumBase, schemas.StrSchema): +class GpuType( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +35,7 @@ class GpuType(schemas.EnumBase, schemas.StrSchema): Lists allowed GPU types for Launch. """ + class MetaOapg: enum_value_to_name = { "nvidia-tesla-t4": "TESLAT4", @@ -42,31 +46,31 @@ class MetaOapg: "nvidia-hopper-h100-1g20gb": "HOPPERH1001G20GB", "nvidia-hopper-h100-3g40gb": "HOPPERH1003G40GB", } - + @schemas.classproperty def TESLAT4(cls): return cls("nvidia-tesla-t4") - + @schemas.classproperty def AMPEREA10(cls): return cls("nvidia-ampere-a10") - + @schemas.classproperty def AMPEREA100(cls): return cls("nvidia-ampere-a100") - + @schemas.classproperty def AMPEREA100E(cls): return cls("nvidia-ampere-a100e") - + @schemas.classproperty def HOPPERH100(cls): return cls("nvidia-hopper-h100") - + @schemas.classproperty def HOPPERH1001G20GB(cls): return cls("nvidia-hopper-h100-1g20gb") - + @schemas.classproperty def HOPPERH1003G40GB(cls): return cls("nvidia-hopper-h100-3g40gb") diff --git a/launch/api_client/model/gpu_type.pyi b/launch/api_client/model/gpu_type.pyi deleted file mode 100644 index 7e628d5a..00000000 --- a/launch/api_client/model/gpu_type.pyi +++ /dev/null @@ -1,53 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class GpuType(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Lists allowed GPU types for Launch. - """ - - @schemas.classproperty - def TESLAT4(cls): - return cls("nvidia-tesla-t4") - @schemas.classproperty - def AMPEREA10(cls): - return cls("nvidia-ampere-a10") - @schemas.classproperty - def AMPEREA100(cls): - return cls("nvidia-ampere-a100") - @schemas.classproperty - def AMPEREA100E(cls): - return cls("nvidia-ampere-a100e") - @schemas.classproperty - def HOPPERH100(cls): - return cls("nvidia-hopper-h100") - @schemas.classproperty - def HOPPERH1001G20GB(cls): - return cls("nvidia-hopper-h100-1g20gb") - @schemas.classproperty - def HOPPERH1003G40GB(cls): - return cls("nvidia-hopper-h100-3g40gb") diff --git a/launch/api_client/model/http_validation_error.py b/launch/api_client/model/http_validation_error.py index 6206ad17..af88fa1b 100644 --- a/launch/api_client/model/http_validation_error.py +++ b/launch/api_client/model/http_validation_error.py @@ -23,101 +23,77 @@ from launch.api_client import schemas # noqa: F401 -class HTTPValidationError(schemas.DictSchema): +class HTTPValidationError( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: + class properties: - class detail(schemas.ListSchema): + + + class detail( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["ValidationError"]: + def items() -> typing.Type['ValidationError']: return ValidationError - + def __new__( cls, - _arg: typing.Union[typing.Tuple["ValidationError"], typing.List["ValidationError"]], + _arg: typing.Union[typing.Tuple['ValidationError'], typing.List['ValidationError']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "detail": + ) -> 'detail': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "ValidationError": + + def __getitem__(self, i: int) -> 'ValidationError': return super().__getitem__(i) - __annotations__ = { "detail": detail, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["detail"]) -> MetaOapg.properties.detail: - ... - + def __getitem__(self, name: typing_extensions.Literal["detail"]) -> MetaOapg.properties.detail: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["detail",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["detail", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["detail"] - ) -> typing.Union[MetaOapg.properties.detail, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["detail"]) -> typing.Union[MetaOapg.properties.detail, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["detail",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["detail", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], detail: typing.Union[MetaOapg.properties.detail, list, tuple, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "HTTPValidationError": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'HTTPValidationError': return super().__new__( cls, *_args, @@ -126,5 +102,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.validation_error import ValidationError diff --git a/launch/api_client/model/http_validation_error.pyi b/launch/api_client/model/http_validation_error.pyi deleted file mode 100644 index 3a172fce..00000000 --- a/launch/api_client/model/http_validation_error.pyi +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class HTTPValidationError(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - class detail(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["ValidationError"]: - return ValidationError - def __new__( - cls, - _arg: typing.Union[typing.Tuple["ValidationError"], typing.List["ValidationError"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "detail": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "ValidationError": - return super().__getitem__(i) - __annotations__ = { - "detail": detail, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["detail"]) -> MetaOapg.properties.detail: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["detail",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["detail"] - ) -> typing.Union[MetaOapg.properties.detail, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["detail",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - detail: typing.Union[MetaOapg.properties.detail, list, tuple, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "HTTPValidationError": - return super().__new__( - cls, - *_args, - detail=detail, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.validation_error import ValidationError diff --git a/launch/api_client/model/image_url.py b/launch/api_client/model/image_url.py new file mode 100644 index 00000000..b5c1f45c --- /dev/null +++ b/launch/api_client/model/image_url.py @@ -0,0 +1,128 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ImageUrl( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "url", + } + + class properties: + + + class url( + schemas.StrSchema + ): + + + class MetaOapg: + format = 'uri' + max_length = 65536 + min_length = 1 + + + class detail( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "auto": "AUTO", + "low": "LOW", + "high": "HIGH", + } + + @schemas.classproperty + def AUTO(cls): + return cls("auto") + + @schemas.classproperty + def LOW(cls): + return cls("low") + + @schemas.classproperty + def HIGH(cls): + return cls("high") + __annotations__ = { + "url": url, + "detail": detail, + } + + url: MetaOapg.properties.url + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["detail"]) -> MetaOapg.properties.detail: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["url", "detail", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["detail"]) -> typing.Union[MetaOapg.properties.detail, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["url", "detail", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + url: typing.Union[MetaOapg.properties.url, str, ], + detail: typing.Union[MetaOapg.properties.detail, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ImageUrl': + return super().__new__( + cls, + *_args, + url=url, + detail=detail, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/input_audio.py b/launch/api_client/model/input_audio.py new file mode 100644 index 00000000..86835ecc --- /dev/null +++ b/launch/api_client/model/input_audio.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class InputAudio( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "data", + "format", + } + + class properties: + data = schemas.StrSchema + + + class format( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "wav": "WAV", + "mp3": "MP3", + } + + @schemas.classproperty + def WAV(cls): + return cls("wav") + + @schemas.classproperty + def MP3(cls): + return cls("mp3") + __annotations__ = { + "data": data, + "format": format, + } + + data: MetaOapg.properties.data + format: MetaOapg.properties.format + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["data", "format", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["data"]) -> MetaOapg.properties.data: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["format"]) -> MetaOapg.properties.format: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["data", "format", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + data: typing.Union[MetaOapg.properties.data, str, ], + format: typing.Union[MetaOapg.properties.format, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'InputAudio': + return super().__new__( + cls, + *_args, + data=data, + format=format, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/json_schema.py b/launch/api_client/model/json_schema.py new file mode 100644 index 00000000..092891e8 --- /dev/null +++ b/launch/api_client/model/json_schema.py @@ -0,0 +1,158 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class JsonSchema( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "name", + } + + class properties: + name = schemas.StrSchema + + + class description( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'description': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def schema() -> typing.Type['ResponseFormatJsonSchemaSchema']: + return ResponseFormatJsonSchemaSchema + + + class strict( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'strict': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "name": name, + "description": description, + "schema": schema, + "strict": strict, + } + + name: MetaOapg.properties.name + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["description"]) -> MetaOapg.properties.description: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["schema"]) -> 'ResponseFormatJsonSchemaSchema': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["strict"]) -> MetaOapg.properties.strict: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "description", "schema", "strict", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["description"]) -> typing.Union[MetaOapg.properties.description, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["schema"]) -> typing.Union['ResponseFormatJsonSchemaSchema', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["strict"]) -> typing.Union[MetaOapg.properties.strict, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "description", "schema", "strict", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], + description: typing.Union[MetaOapg.properties.description, None, str, schemas.Unset] = schemas.unset, + schema: typing.Union['ResponseFormatJsonSchemaSchema', schemas.Unset] = schemas.unset, + strict: typing.Union[MetaOapg.properties.strict, None, bool, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'JsonSchema': + return super().__new__( + cls, + *_args, + name=name, + description=description, + schema=schema, + strict=strict, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.response_format_json_schema_schema import ( + ResponseFormatJsonSchemaSchema, +) diff --git a/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py index 484deaff..b4191e33 100644 --- a/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py +++ b/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.py @@ -23,116 +23,82 @@ from launch.api_client import schemas # noqa: F401 -class ListDockerImageBatchJobBundleV1Response(schemas.DictSchema): +class ListDockerImageBatchJobBundleV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "docker_image_batch_job_bundles", } - + class properties: - class docker_image_batch_job_bundles(schemas.ListSchema): + + + class docker_image_batch_job_bundles( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["DockerImageBatchJobBundleV1Response"]: + def items() -> typing.Type['DockerImageBatchJobBundleV1Response']: return DockerImageBatchJobBundleV1Response - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple["DockerImageBatchJobBundleV1Response"], - typing.List["DockerImageBatchJobBundleV1Response"], - ], + _arg: typing.Union[typing.Tuple['DockerImageBatchJobBundleV1Response'], typing.List['DockerImageBatchJobBundleV1Response']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "docker_image_batch_job_bundles": + ) -> 'docker_image_batch_job_bundles': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "DockerImageBatchJobBundleV1Response": + + def __getitem__(self, i: int) -> 'DockerImageBatchJobBundleV1Response': return super().__getitem__(i) - __annotations__ = { "docker_image_batch_job_bundles": docker_image_batch_job_bundles, } - + docker_image_batch_job_bundles: MetaOapg.properties.docker_image_batch_job_bundles - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundles"] - ) -> MetaOapg.properties.docker_image_batch_job_bundles: - ... - + def __getitem__(self, name: typing_extensions.Literal["docker_image_batch_job_bundles"]) -> MetaOapg.properties.docker_image_batch_job_bundles: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundles",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundles", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundles"] - ) -> MetaOapg.properties.docker_image_batch_job_bundles: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["docker_image_batch_job_bundles"]) -> MetaOapg.properties.docker_image_batch_job_bundles: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundles",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["docker_image_batch_job_bundles", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - docker_image_batch_job_bundles: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundles, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + docker_image_batch_job_bundles: typing.Union[MetaOapg.properties.docker_image_batch_job_bundles, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListDockerImageBatchJobBundleV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListDockerImageBatchJobBundleV1Response': return super().__new__( cls, *_args, @@ -141,7 +107,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.docker_image_batch_job_bundle_v1_response import ( DockerImageBatchJobBundleV1Response, ) diff --git a/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.pyi b/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.pyi deleted file mode 100644 index 6773fc22..00000000 --- a/launch/api_client/model/list_docker_image_batch_job_bundle_v1_response.pyi +++ /dev/null @@ -1,130 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListDockerImageBatchJobBundleV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "docker_image_batch_job_bundles", - } - - class properties: - class docker_image_batch_job_bundles(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["DockerImageBatchJobBundleV1Response"]: - return DockerImageBatchJobBundleV1Response - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple["DockerImageBatchJobBundleV1Response"], - typing.List["DockerImageBatchJobBundleV1Response"], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "docker_image_batch_job_bundles": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "DockerImageBatchJobBundleV1Response": - return super().__getitem__(i) - __annotations__ = { - "docker_image_batch_job_bundles": docker_image_batch_job_bundles, - } - docker_image_batch_job_bundles: MetaOapg.properties.docker_image_batch_job_bundles - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["docker_image_batch_job_bundles"] - ) -> MetaOapg.properties.docker_image_batch_job_bundles: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundles",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["docker_image_batch_job_bundles"] - ) -> MetaOapg.properties.docker_image_batch_job_bundles: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["docker_image_batch_job_bundles",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - docker_image_batch_job_bundles: typing.Union[ - MetaOapg.properties.docker_image_batch_job_bundles, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListDockerImageBatchJobBundleV1Response": - return super().__new__( - cls, - *_args, - docker_image_batch_job_bundles=docker_image_batch_job_bundles, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) diff --git a/launch/api_client/model/list_docker_image_batch_jobs_v1_response.py b/launch/api_client/model/list_docker_image_batch_jobs_v1_response.py index f3985644..1213f5a8 100644 --- a/launch/api_client/model/list_docker_image_batch_jobs_v1_response.py +++ b/launch/api_client/model/list_docker_image_batch_jobs_v1_response.py @@ -23,109 +23,82 @@ from launch.api_client import schemas # noqa: F401 -class ListDockerImageBatchJobsV1Response(schemas.DictSchema): +class ListDockerImageBatchJobsV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "jobs", } - + class properties: - class jobs(schemas.ListSchema): + + + class jobs( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["DockerImageBatchJob"]: + def items() -> typing.Type['DockerImageBatchJob']: return DockerImageBatchJob - + def __new__( cls, - _arg: typing.Union[typing.Tuple["DockerImageBatchJob"], typing.List["DockerImageBatchJob"]], + _arg: typing.Union[typing.Tuple['DockerImageBatchJob'], typing.List['DockerImageBatchJob']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "jobs": + ) -> 'jobs': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "DockerImageBatchJob": + + def __getitem__(self, i: int) -> 'DockerImageBatchJob': return super().__getitem__(i) - __annotations__ = { "jobs": jobs, } - + jobs: MetaOapg.properties.jobs - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: - ... - + def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - jobs: typing.Union[ - MetaOapg.properties.jobs, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + jobs: typing.Union[MetaOapg.properties.jobs, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListDockerImageBatchJobsV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListDockerImageBatchJobsV1Response': return super().__new__( cls, *_args, @@ -134,5 +107,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.docker_image_batch_job import DockerImageBatchJob diff --git a/launch/api_client/model/list_docker_image_batch_jobs_v1_response.pyi b/launch/api_client/model/list_docker_image_batch_jobs_v1_response.pyi deleted file mode 100644 index e62db2ff..00000000 --- a/launch/api_client/model/list_docker_image_batch_jobs_v1_response.pyi +++ /dev/null @@ -1,121 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListDockerImageBatchJobsV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "jobs", - } - - class properties: - class jobs(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["DockerImageBatchJob"]: - return DockerImageBatchJob - def __new__( - cls, - _arg: typing.Union[typing.Tuple["DockerImageBatchJob"], typing.List["DockerImageBatchJob"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "jobs": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "DockerImageBatchJob": - return super().__getitem__(i) - __annotations__ = { - "jobs": jobs, - } - jobs: MetaOapg.properties.jobs - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - jobs: typing.Union[ - MetaOapg.properties.jobs, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListDockerImageBatchJobsV1Response": - return super().__new__( - cls, - *_args, - jobs=jobs, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.docker_image_batch_job import DockerImageBatchJob diff --git a/launch/api_client/model/list_files_response.py b/launch/api_client/model/list_files_response.py index 5490fa3b..8af8b13a 100644 --- a/launch/api_client/model/list_files_response.py +++ b/launch/api_client/model/list_files_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class ListFilesResponse(schemas.DictSchema): +class ListFilesResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,102 +34,73 @@ class ListFilesResponse(schemas.DictSchema): Response object for listing files. """ + class MetaOapg: required = { "files", } - + class properties: - class files(schemas.ListSchema): + + + class files( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["GetFileResponse"]: + def items() -> typing.Type['GetFileResponse']: return GetFileResponse - + def __new__( cls, - _arg: typing.Union[typing.Tuple["GetFileResponse"], typing.List["GetFileResponse"]], + _arg: typing.Union[typing.Tuple['GetFileResponse'], typing.List['GetFileResponse']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "files": + ) -> 'files': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "GetFileResponse": + + def __getitem__(self, i: int) -> 'GetFileResponse': return super().__getitem__(i) - __annotations__ = { "files": files, } - + files: MetaOapg.properties.files - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: - ... - + def __getitem__(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["files",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["files", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["files",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["files", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - files: typing.Union[ - MetaOapg.properties.files, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + files: typing.Union[MetaOapg.properties.files, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListFilesResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListFilesResponse': return super().__new__( cls, *_args, @@ -136,5 +109,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.get_file_response import GetFileResponse diff --git a/launch/api_client/model/list_files_response.pyi b/launch/api_client/model/list_files_response.pyi deleted file mode 100644 index 606223de..00000000 --- a/launch/api_client/model/list_files_response.pyi +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListFilesResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for listing files. - """ - - class MetaOapg: - required = { - "files", - } - - class properties: - class files(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["GetFileResponse"]: - return GetFileResponse - def __new__( - cls, - _arg: typing.Union[typing.Tuple["GetFileResponse"], typing.List["GetFileResponse"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "files": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "GetFileResponse": - return super().__getitem__(i) - __annotations__ = { - "files": files, - } - files: MetaOapg.properties.files - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["files",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["files"]) -> MetaOapg.properties.files: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["files",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - files: typing.Union[ - MetaOapg.properties.files, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListFilesResponse": - return super().__new__( - cls, - *_args, - files=files, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.get_file_response import GetFileResponse diff --git a/launch/api_client/model/list_fine_tune_job_response.py b/launch/api_client/model/list_fine_tune_job_response.py index d4f7d5a7..fa6b2afe 100644 --- a/launch/api_client/model/list_fine_tune_job_response.py +++ b/launch/api_client/model/list_fine_tune_job_response.py @@ -76,7 +76,9 @@ def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: def __getitem__( self, name: typing.Union[ - typing_extensions.Literal["jobs",], + typing_extensions.Literal[ + "jobs", + ], str, ], ): @@ -94,7 +96,9 @@ def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, s def get_item_oapg( self, name: typing.Union[ - typing_extensions.Literal["jobs",], + typing_extensions.Literal[ + "jobs", + ], str, ], ): diff --git a/launch/api_client/model/list_fine_tune_job_response.pyi b/launch/api_client/model/list_fine_tune_job_response.pyi deleted file mode 100644 index 5bfce4fe..00000000 --- a/launch/api_client/model/list_fine_tune_job_response.pyi +++ /dev/null @@ -1,124 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListFineTunesResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "jobs", - } - - class properties: - class jobs(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["GetFineTuneResponse"]: - return GetFineTuneResponse - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple["GetFineTuneResponse"], - typing.List["GetFineTuneResponse"], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "jobs": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "GetFineTuneResponse": - return super().__getitem__(i) - __annotations__ = { - "jobs": jobs, - } - jobs: MetaOapg.properties.jobs - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - jobs: typing.Union[ - MetaOapg.properties.jobs, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListFineTunesResponse": - return super().__new__( - cls, - *_args, - jobs=jobs, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.get_fine_tune_response import GetFineTuneResponse diff --git a/launch/api_client/model/list_fine_tunes_response.py b/launch/api_client/model/list_fine_tunes_response.py index 06570263..7243eb4a 100644 --- a/launch/api_client/model/list_fine_tunes_response.py +++ b/launch/api_client/model/list_fine_tunes_response.py @@ -23,109 +23,82 @@ from launch.api_client import schemas # noqa: F401 -class ListFineTunesResponse(schemas.DictSchema): +class ListFineTunesResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "jobs", } - + class properties: - class jobs(schemas.ListSchema): + + + class jobs( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["GetFineTuneResponse"]: + def items() -> typing.Type['GetFineTuneResponse']: return GetFineTuneResponse - + def __new__( cls, - _arg: typing.Union[typing.Tuple["GetFineTuneResponse"], typing.List["GetFineTuneResponse"]], + _arg: typing.Union[typing.Tuple['GetFineTuneResponse'], typing.List['GetFineTuneResponse']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "jobs": + ) -> 'jobs': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "GetFineTuneResponse": + + def __getitem__(self, i: int) -> 'GetFineTuneResponse': return super().__getitem__(i) - __annotations__ = { "jobs": jobs, } - + jobs: MetaOapg.properties.jobs - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: - ... - + def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["jobs", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - jobs: typing.Union[ - MetaOapg.properties.jobs, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + jobs: typing.Union[MetaOapg.properties.jobs, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListFineTunesResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListFineTunesResponse': return super().__new__( cls, *_args, @@ -134,5 +107,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.get_fine_tune_response import GetFineTuneResponse diff --git a/launch/api_client/model/list_fine_tunes_response.pyi b/launch/api_client/model/list_fine_tunes_response.pyi deleted file mode 100644 index 4300f6b0..00000000 --- a/launch/api_client/model/list_fine_tunes_response.pyi +++ /dev/null @@ -1,121 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListFineTunesResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "jobs", - } - - class properties: - class jobs(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["GetFineTuneResponse"]: - return GetFineTuneResponse - def __new__( - cls, - _arg: typing.Union[typing.Tuple["GetFineTuneResponse"], typing.List["GetFineTuneResponse"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "jobs": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "GetFineTuneResponse": - return super().__getitem__(i) - __annotations__ = { - "jobs": jobs, - } - jobs: MetaOapg.properties.jobs - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["jobs"]) -> MetaOapg.properties.jobs: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["jobs",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - jobs: typing.Union[ - MetaOapg.properties.jobs, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListFineTunesResponse": - return super().__new__( - cls, - *_args, - jobs=jobs, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.get_fine_tune_response import GetFineTuneResponse diff --git a/launch/api_client/model/list_llm_model_endpoints_v1_response.py b/launch/api_client/model/list_llm_model_endpoints_v1_response.py index 839c1b1d..726d98fb 100644 --- a/launch/api_client/model/list_llm_model_endpoints_v1_response.py +++ b/launch/api_client/model/list_llm_model_endpoints_v1_response.py @@ -23,111 +23,82 @@ from launch.api_client import schemas # noqa: F401 -class ListLLMModelEndpointsV1Response(schemas.DictSchema): +class ListLLMModelEndpointsV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "model_endpoints", } - + class properties: - class model_endpoints(schemas.ListSchema): + + + class model_endpoints( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["GetLLMModelEndpointV1Response"]: + def items() -> typing.Type['GetLLMModelEndpointV1Response']: return GetLLMModelEndpointV1Response - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple["GetLLMModelEndpointV1Response"], typing.List["GetLLMModelEndpointV1Response"] - ], + _arg: typing.Union[typing.Tuple['GetLLMModelEndpointV1Response'], typing.List['GetLLMModelEndpointV1Response']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_endpoints": + ) -> 'model_endpoints': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "GetLLMModelEndpointV1Response": + + def __getitem__(self, i: int) -> 'GetLLMModelEndpointV1Response': return super().__getitem__(i) - __annotations__ = { "model_endpoints": model_endpoints, } - + model_endpoints: MetaOapg.properties.model_endpoints - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_endpoints: typing.Union[ - MetaOapg.properties.model_endpoints, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + model_endpoints: typing.Union[MetaOapg.properties.model_endpoints, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListLLMModelEndpointsV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListLLMModelEndpointsV1Response': return super().__new__( cls, *_args, @@ -136,7 +107,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.get_llm_model_endpoint_v1_response import ( GetLLMModelEndpointV1Response, ) diff --git a/launch/api_client/model/list_llm_model_endpoints_v1_response.pyi b/launch/api_client/model/list_llm_model_endpoints_v1_response.pyi deleted file mode 100644 index f5756fee..00000000 --- a/launch/api_client/model/list_llm_model_endpoints_v1_response.pyi +++ /dev/null @@ -1,129 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListLLMModelEndpointsV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "model_endpoints", - } - - class properties: - class model_endpoints(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["GetLLMModelEndpointV1Response"]: - return GetLLMModelEndpointV1Response - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple["GetLLMModelEndpointV1Response"], typing.List["GetLLMModelEndpointV1Response"] - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_endpoints": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "GetLLMModelEndpointV1Response": - return super().__getitem__(i) - __annotations__ = { - "model_endpoints": model_endpoints, - } - model_endpoints: MetaOapg.properties.model_endpoints - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_endpoints"] - ) -> MetaOapg.properties.model_endpoints: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_endpoints"] - ) -> MetaOapg.properties.model_endpoints: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_endpoints: typing.Union[ - MetaOapg.properties.model_endpoints, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListLLMModelEndpointsV1Response": - return super().__new__( - cls, - *_args, - model_endpoints=model_endpoints, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) diff --git a/launch/api_client/model/list_model_bundles_v1_response.py b/launch/api_client/model/list_model_bundles_v1_response.py index 85827680..6e7bb2a8 100644 --- a/launch/api_client/model/list_model_bundles_v1_response.py +++ b/launch/api_client/model/list_model_bundles_v1_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class ListModelBundlesV1Response(schemas.DictSchema): +class ListModelBundlesV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,102 +34,73 @@ class ListModelBundlesV1Response(schemas.DictSchema): Response object for listing Model Bundles. """ + class MetaOapg: required = { "model_bundles", } - + class properties: - class model_bundles(schemas.ListSchema): + + + class model_bundles( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["ModelBundleV1Response"]: + def items() -> typing.Type['ModelBundleV1Response']: return ModelBundleV1Response - + def __new__( cls, - _arg: typing.Union[typing.Tuple["ModelBundleV1Response"], typing.List["ModelBundleV1Response"]], + _arg: typing.Union[typing.Tuple['ModelBundleV1Response'], typing.List['ModelBundleV1Response']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_bundles": + ) -> 'model_bundles': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "ModelBundleV1Response": + + def __getitem__(self, i: int) -> 'ModelBundleV1Response': return super().__getitem__(i) - __annotations__ = { "model_bundles": model_bundles, } - + model_bundles: MetaOapg.properties.model_bundles - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundles: typing.Union[ - MetaOapg.properties.model_bundles, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + model_bundles: typing.Union[MetaOapg.properties.model_bundles, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListModelBundlesV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListModelBundlesV1Response': return super().__new__( cls, *_args, @@ -136,7 +109,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.model_bundle_v1_response import ( ModelBundleV1Response, ) diff --git a/launch/api_client/model/list_model_bundles_v1_response.pyi b/launch/api_client/model/list_model_bundles_v1_response.pyi deleted file mode 100644 index 044323e2..00000000 --- a/launch/api_client/model/list_model_bundles_v1_response.pyi +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListModelBundlesV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for listing Model Bundles. - """ - - class MetaOapg: - required = { - "model_bundles", - } - - class properties: - class model_bundles(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["ModelBundleV1Response"]: - return ModelBundleV1Response - def __new__( - cls, - _arg: typing.Union[typing.Tuple["ModelBundleV1Response"], typing.List["ModelBundleV1Response"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_bundles": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "ModelBundleV1Response": - return super().__getitem__(i) - __annotations__ = { - "model_bundles": model_bundles, - } - model_bundles: MetaOapg.properties.model_bundles - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundles: typing.Union[ - MetaOapg.properties.model_bundles, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListModelBundlesV1Response": - return super().__new__( - cls, - *_args, - model_bundles=model_bundles, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.model_bundle_v1_response import ModelBundleV1Response diff --git a/launch/api_client/model/list_model_bundles_v2_response.py b/launch/api_client/model/list_model_bundles_v2_response.py index 1b92e7e3..0b6ab300 100644 --- a/launch/api_client/model/list_model_bundles_v2_response.py +++ b/launch/api_client/model/list_model_bundles_v2_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class ListModelBundlesV2Response(schemas.DictSchema): +class ListModelBundlesV2Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,102 +34,73 @@ class ListModelBundlesV2Response(schemas.DictSchema): Response object for listing Model Bundles. """ + class MetaOapg: required = { "model_bundles", } - + class properties: - class model_bundles(schemas.ListSchema): + + + class model_bundles( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["ModelBundleV2Response"]: + def items() -> typing.Type['ModelBundleV2Response']: return ModelBundleV2Response - + def __new__( cls, - _arg: typing.Union[typing.Tuple["ModelBundleV2Response"], typing.List["ModelBundleV2Response"]], + _arg: typing.Union[typing.Tuple['ModelBundleV2Response'], typing.List['ModelBundleV2Response']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_bundles": + ) -> 'model_bundles': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "ModelBundleV2Response": + + def __getitem__(self, i: int) -> 'ModelBundleV2Response': return super().__getitem__(i) - __annotations__ = { "model_bundles": model_bundles, } - + model_bundles: MetaOapg.properties.model_bundles - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundles", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundles: typing.Union[ - MetaOapg.properties.model_bundles, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + model_bundles: typing.Union[MetaOapg.properties.model_bundles, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListModelBundlesV2Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListModelBundlesV2Response': return super().__new__( cls, *_args, @@ -136,7 +109,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.model_bundle_v2_response import ( ModelBundleV2Response, ) diff --git a/launch/api_client/model/list_model_bundles_v2_response.pyi b/launch/api_client/model/list_model_bundles_v2_response.pyi deleted file mode 100644 index ac37d268..00000000 --- a/launch/api_client/model/list_model_bundles_v2_response.pyi +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListModelBundlesV2Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for listing Model Bundles. - """ - - class MetaOapg: - required = { - "model_bundles", - } - - class properties: - class model_bundles(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["ModelBundleV2Response"]: - return ModelBundleV2Response - def __new__( - cls, - _arg: typing.Union[typing.Tuple["ModelBundleV2Response"], typing.List["ModelBundleV2Response"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_bundles": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "ModelBundleV2Response": - return super().__getitem__(i) - __annotations__ = { - "model_bundles": model_bundles, - } - model_bundles: MetaOapg.properties.model_bundles - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_bundles"]) -> MetaOapg.properties.model_bundles: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_bundles",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_bundles: typing.Union[ - MetaOapg.properties.model_bundles, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListModelBundlesV2Response": - return super().__new__( - cls, - *_args, - model_bundles=model_bundles, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.model_bundle_v2_response import ModelBundleV2Response diff --git a/launch/api_client/model/list_model_endpoints_v1_response.py b/launch/api_client/model/list_model_endpoints_v1_response.py index 7368c7e9..2ddfacd1 100644 --- a/launch/api_client/model/list_model_endpoints_v1_response.py +++ b/launch/api_client/model/list_model_endpoints_v1_response.py @@ -23,111 +23,82 @@ from launch.api_client import schemas # noqa: F401 -class ListModelEndpointsV1Response(schemas.DictSchema): +class ListModelEndpointsV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "model_endpoints", } - + class properties: - class model_endpoints(schemas.ListSchema): + + + class model_endpoints( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["GetModelEndpointV1Response"]: + def items() -> typing.Type['GetModelEndpointV1Response']: return GetModelEndpointV1Response - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple["GetModelEndpointV1Response"], typing.List["GetModelEndpointV1Response"] - ], + _arg: typing.Union[typing.Tuple['GetModelEndpointV1Response'], typing.List['GetModelEndpointV1Response']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_endpoints": + ) -> 'model_endpoints': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "GetModelEndpointV1Response": + + def __getitem__(self, i: int) -> 'GetModelEndpointV1Response': return super().__getitem__(i) - __annotations__ = { "model_endpoints": model_endpoints, } - + model_endpoints: MetaOapg.properties.model_endpoints - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_endpoints"]) -> MetaOapg.properties.model_endpoints: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_endpoints", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_endpoints: typing.Union[ - MetaOapg.properties.model_endpoints, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + model_endpoints: typing.Union[MetaOapg.properties.model_endpoints, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListModelEndpointsV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListModelEndpointsV1Response': return super().__new__( cls, *_args, @@ -136,7 +107,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.get_model_endpoint_v1_response import ( GetModelEndpointV1Response, ) diff --git a/launch/api_client/model/list_model_endpoints_v1_response.pyi b/launch/api_client/model/list_model_endpoints_v1_response.pyi deleted file mode 100644 index e2340ff3..00000000 --- a/launch/api_client/model/list_model_endpoints_v1_response.pyi +++ /dev/null @@ -1,129 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListModelEndpointsV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "model_endpoints", - } - - class properties: - class model_endpoints(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["GetModelEndpointV1Response"]: - return GetModelEndpointV1Response - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple["GetModelEndpointV1Response"], typing.List["GetModelEndpointV1Response"] - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_endpoints": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "GetModelEndpointV1Response": - return super().__getitem__(i) - __annotations__ = { - "model_endpoints": model_endpoints, - } - model_endpoints: MetaOapg.properties.model_endpoints - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_endpoints"] - ) -> MetaOapg.properties.model_endpoints: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_endpoints"] - ) -> MetaOapg.properties.model_endpoints: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["model_endpoints",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_endpoints: typing.Union[ - MetaOapg.properties.model_endpoints, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListModelEndpointsV1Response": - return super().__new__( - cls, - *_args, - model_endpoints=model_endpoints, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) diff --git a/launch/api_client/model/list_triggers_v1_response.py b/launch/api_client/model/list_triggers_v1_response.py index c9638646..7b9d6456 100644 --- a/launch/api_client/model/list_triggers_v1_response.py +++ b/launch/api_client/model/list_triggers_v1_response.py @@ -23,109 +23,82 @@ from launch.api_client import schemas # noqa: F401 -class ListTriggersV1Response(schemas.DictSchema): +class ListTriggersV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "triggers", } - + class properties: - class triggers(schemas.ListSchema): + + + class triggers( + schemas.ListSchema + ): + + class MetaOapg: + @staticmethod - def items() -> typing.Type["GetTriggerV1Response"]: + def items() -> typing.Type['GetTriggerV1Response']: return GetTriggerV1Response - + def __new__( cls, - _arg: typing.Union[typing.Tuple["GetTriggerV1Response"], typing.List["GetTriggerV1Response"]], + _arg: typing.Union[typing.Tuple['GetTriggerV1Response'], typing.List['GetTriggerV1Response']], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "triggers": + ) -> 'triggers': return super().__new__( cls, _arg, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> "GetTriggerV1Response": + + def __getitem__(self, i: int) -> 'GetTriggerV1Response': return super().__getitem__(i) - __annotations__ = { "triggers": triggers, } - + triggers: MetaOapg.properties.triggers - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: - ... - + def __getitem__(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["triggers",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["triggers", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["triggers",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["triggers", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - triggers: typing.Union[ - MetaOapg.properties.triggers, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + triggers: typing.Union[MetaOapg.properties.triggers, list, tuple, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListTriggersV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ListTriggersV1Response': return super().__new__( cls, *_args, @@ -134,7 +107,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.get_trigger_v1_response import ( GetTriggerV1Response, ) diff --git a/launch/api_client/model/list_triggers_v1_response.pyi b/launch/api_client/model/list_triggers_v1_response.pyi deleted file mode 100644 index e7b3892f..00000000 --- a/launch/api_client/model/list_triggers_v1_response.pyi +++ /dev/null @@ -1,121 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ListTriggersV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "triggers", - } - - class properties: - class triggers(schemas.ListSchema): - class MetaOapg: - @staticmethod - def items() -> typing.Type["GetTriggerV1Response"]: - return GetTriggerV1Response - def __new__( - cls, - _arg: typing.Union[typing.Tuple["GetTriggerV1Response"], typing.List["GetTriggerV1Response"]], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "triggers": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> "GetTriggerV1Response": - return super().__getitem__(i) - __annotations__ = { - "triggers": triggers, - } - triggers: MetaOapg.properties.triggers - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["triggers",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triggers"]) -> MetaOapg.properties.triggers: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["triggers",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - triggers: typing.Union[ - MetaOapg.properties.triggers, - list, - tuple, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ListTriggersV1Response": - return super().__new__( - cls, - *_args, - triggers=triggers, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.get_trigger_v1_response import GetTriggerV1Response diff --git a/launch/api_client/model/llm_fine_tune_event.py b/launch/api_client/model/llm_fine_tune_event.py index d730b0bf..954606a3 100644 --- a/launch/api_client/model/llm_fine_tune_event.py +++ b/launch/api_client/model/llm_fine_tune_event.py @@ -23,128 +23,96 @@ from launch.api_client import schemas # noqa: F401 -class LLMFineTuneEvent(schemas.DictSchema): +class LLMFineTuneEvent( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "level", "message", } - + class properties: - level = schemas.StrSchema message = schemas.StrSchema - timestamp = schemas.NumberSchema + level = schemas.StrSchema + + + class timestamp( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'timestamp': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "level": level, "message": message, + "level": level, "timestamp": timestamp, } - + level: MetaOapg.properties.level message: MetaOapg.properties.message - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: - ... - + def __getitem__(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: - ... - + def __getitem__(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: - ... - + def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "level", - "message", - "timestamp", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["message", "level", "timestamp", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["timestamp"] - ) -> typing.Union[MetaOapg.properties.timestamp, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["timestamp"]) -> typing.Union[MetaOapg.properties.timestamp, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "level", - "message", - "timestamp", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["message", "level", "timestamp", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - level: typing.Union[ - MetaOapg.properties.level, - str, - ], - message: typing.Union[ - MetaOapg.properties.message, - str, - ], - timestamp: typing.Union[ - MetaOapg.properties.timestamp, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + level: typing.Union[MetaOapg.properties.level, str, ], + message: typing.Union[MetaOapg.properties.message, str, ], + timestamp: typing.Union[MetaOapg.properties.timestamp, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "LLMFineTuneEvent": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'LLMFineTuneEvent': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/llm_fine_tune_event.pyi b/launch/api_client/model/llm_fine_tune_event.pyi deleted file mode 100644 index 2a2998b8..00000000 --- a/launch/api_client/model/llm_fine_tune_event.pyi +++ /dev/null @@ -1,135 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class LLMFineTuneEvent(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "level", - "message", - } - - class properties: - level = schemas.StrSchema - message = schemas.StrSchema - timestamp = schemas.NumberSchema - __annotations__ = { - "level": level, - "message": message, - "timestamp": timestamp, - } - level: MetaOapg.properties.level - message: MetaOapg.properties.message - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "level", - "message", - "timestamp", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["level"]) -> MetaOapg.properties.level: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["message"]) -> MetaOapg.properties.message: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["timestamp"] - ) -> typing.Union[MetaOapg.properties.timestamp, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "level", - "message", - "timestamp", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - level: typing.Union[ - MetaOapg.properties.level, - str, - ], - message: typing.Union[ - MetaOapg.properties.message, - str, - ], - timestamp: typing.Union[ - MetaOapg.properties.timestamp, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "LLMFineTuneEvent": - return super().__new__( - cls, - *_args, - level=level, - message=message, - timestamp=timestamp, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/llm_inference_framework.py b/launch/api_client/model/llm_inference_framework.py index 82d4fabd..cd6c0c75 100644 --- a/launch/api_client/model/llm_inference_framework.py +++ b/launch/api_client/model/llm_inference_framework.py @@ -23,15 +23,17 @@ from launch.api_client import schemas # noqa: F401 -class LLMInferenceFramework(schemas.EnumBase, schemas.StrSchema): +class LLMInferenceFramework( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "deepspeed": "DEEPSPEED", @@ -39,24 +41,29 @@ class MetaOapg: "vllm": "VLLM", "lightllm": "LIGHTLLM", "tensorrt_llm": "TENSORRT_LLM", + "sglang": "SGLANG", } - + @schemas.classproperty def DEEPSPEED(cls): return cls("deepspeed") - + @schemas.classproperty def TEXT_GENERATION_INFERENCE(cls): return cls("text_generation_inference") - + @schemas.classproperty def VLLM(cls): return cls("vllm") - + @schemas.classproperty def LIGHTLLM(cls): return cls("lightllm") - + @schemas.classproperty def TENSORRT_LLM(cls): return cls("tensorrt_llm") + + @schemas.classproperty + def SGLANG(cls): + return cls("sglang") diff --git a/launch/api_client/model/llm_inference_framework.pyi b/launch/api_client/model/llm_inference_framework.pyi deleted file mode 100644 index 1ca1601a..00000000 --- a/launch/api_client/model/llm_inference_framework.pyi +++ /dev/null @@ -1,47 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class LLMInferenceFramework(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An enumeration. - """ - - @schemas.classproperty - def DEEPSPEED(cls): - return cls("deepspeed") - @schemas.classproperty - def TEXT_GENERATION_INFERENCE(cls): - return cls("text_generation_inference") - @schemas.classproperty - def VLLM(cls): - return cls("vllm") - @schemas.classproperty - def LIGHTLLM(cls): - return cls("lightllm") - @schemas.classproperty - def TENSORRT_LLM(cls): - return cls("tensorrt_llm") diff --git a/launch/api_client/model/llm_source.py b/launch/api_client/model/llm_source.py index 7b1ed8c2..d2fa2586 100644 --- a/launch/api_client/model/llm_source.py +++ b/launch/api_client/model/llm_source.py @@ -23,20 +23,22 @@ from launch.api_client import schemas # noqa: F401 -class LLMSource(schemas.EnumBase, schemas.StrSchema): +class LLMSource( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "hugging_face": "HUGGING_FACE", } - + @schemas.classproperty def HUGGING_FACE(cls): return cls("hugging_face") diff --git a/launch/api_client/model/logprobs.py b/launch/api_client/model/logprobs.py new file mode 100644 index 00000000..ba8b60ae --- /dev/null +++ b/launch/api_client/model/logprobs.py @@ -0,0 +1,151 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Logprobs( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "refusal", + "content", + } + + class properties: + + + class content( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionTokenLogprob']: + return ChatCompletionTokenLogprob + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class refusal( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['ChatCompletionTokenLogprob']: + return ChatCompletionTokenLogprob + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'refusal': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "content": content, + "refusal": refusal, + } + + refusal: MetaOapg.properties.refusal + content: MetaOapg.properties.content + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["content", "refusal", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["refusal"]) -> MetaOapg.properties.refusal: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["content", "refusal", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + refusal: typing.Union[MetaOapg.properties.refusal, list, tuple, None, ], + content: typing.Union[MetaOapg.properties.content, list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Logprobs': + return super().__new__( + cls, + *_args, + refusal=refusal, + content=content, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.chat_completion_token_logprob import ( + ChatCompletionTokenLogprob, +) diff --git a/launch/api_client/model/logprobs2.py b/launch/api_client/model/logprobs2.py new file mode 100644 index 00000000..95390954 --- /dev/null +++ b/launch/api_client/model/logprobs2.py @@ -0,0 +1,228 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Logprobs2( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + + class text_offset( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'text_offset': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class token_logprobs( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.NumberSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'token_logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokens( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class top_logprobs( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + + + class items( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.NumberSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, decimal.Decimal, int, float, ], + ) -> 'items': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'top_logprobs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "text_offset": text_offset, + "token_logprobs": token_logprobs, + "tokens": tokens, + "top_logprobs": top_logprobs, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["text_offset"]) -> MetaOapg.properties.text_offset: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["token_logprobs"]) -> MetaOapg.properties.token_logprobs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokens"]) -> MetaOapg.properties.tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["top_logprobs"]) -> MetaOapg.properties.top_logprobs: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["text_offset", "token_logprobs", "tokens", "top_logprobs", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["text_offset"]) -> typing.Union[MetaOapg.properties.text_offset, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["token_logprobs"]) -> typing.Union[MetaOapg.properties.token_logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokens"]) -> typing.Union[MetaOapg.properties.tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["top_logprobs"]) -> typing.Union[MetaOapg.properties.top_logprobs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["text_offset", "token_logprobs", "tokens", "top_logprobs", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + text_offset: typing.Union[MetaOapg.properties.text_offset, list, tuple, None, schemas.Unset] = schemas.unset, + token_logprobs: typing.Union[MetaOapg.properties.token_logprobs, list, tuple, None, schemas.Unset] = schemas.unset, + tokens: typing.Union[MetaOapg.properties.tokens, list, tuple, None, schemas.Unset] = schemas.unset, + top_logprobs: typing.Union[MetaOapg.properties.top_logprobs, list, tuple, None, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'Logprobs2': + return super().__new__( + cls, + *_args, + text_offset=text_offset, + token_logprobs=token_logprobs, + tokens=tokens, + top_logprobs=top_logprobs, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/metadata.py b/launch/api_client/model/metadata.py new file mode 100644 index 00000000..ea645e23 --- /dev/null +++ b/launch/api_client/model/metadata.py @@ -0,0 +1,61 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'Metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/model_bundle_environment_params.py b/launch/api_client/model/model_bundle_environment_params.py index 52633215..a182d03e 100644 --- a/launch/api_client/model/model_bundle_environment_params.py +++ b/launch/api_client/model/model_bundle_environment_params.py @@ -23,173 +23,187 @@ from launch.api_client import schemas # noqa: F401 -class ModelBundleEnvironmentParams(schemas.DictSchema): +class ModelBundleEnvironmentParams( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech + Ref: https://openapi-generator.tech - Do not edit the class manually. + Do not edit the class manually. - This is the entity-layer class for the Model Bundle environment parameters. Being an - entity-layer class, it should be a plain data object. + This is the entity-layer class for the Model Bundle environment parameters. Being an +entity-layer class, it should be a plain data object. """ + class MetaOapg: required = { "framework_type", } - + class properties: + @staticmethod - def framework_type() -> typing.Type["ModelBundleFrameworkType"]: + def framework_type() -> typing.Type['ModelBundleFrameworkType']: return ModelBundleFrameworkType - - ecr_repo = schemas.StrSchema - image_tag = schemas.StrSchema - pytorch_image_tag = schemas.StrSchema - tensorflow_version = schemas.StrSchema + + + class pytorch_image_tag( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'pytorch_image_tag': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tensorflow_version( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tensorflow_version': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ecr_repo( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ecr_repo': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class image_tag( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'image_tag': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "framework_type": framework_type, - "ecr_repo": ecr_repo, - "image_tag": image_tag, "pytorch_image_tag": pytorch_image_tag, "tensorflow_version": tensorflow_version, + "ecr_repo": ecr_repo, + "image_tag": image_tag, } - - framework_type: "ModelBundleFrameworkType" - + + framework_type: 'ModelBundleFrameworkType' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> "ModelBundleFrameworkType": - ... - + def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> 'ModelBundleFrameworkType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ecr_repo"]) -> MetaOapg.properties.ecr_repo: - ... - + def __getitem__(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> MetaOapg.properties.pytorch_image_tag: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["tensorflow_version"]) -> MetaOapg.properties.tensorflow_version: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> MetaOapg.properties.pytorch_image_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["ecr_repo"]) -> MetaOapg.properties.ecr_repo: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> MetaOapg.properties.tensorflow_version: - ... - + def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "ecr_repo", - "image_tag", - "pytorch_image_tag", - "tensorflow_version", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", "tensorflow_version", "ecr_repo", "image_tag", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> "ModelBundleFrameworkType": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> 'ModelBundleFrameworkType': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["ecr_repo"] - ) -> typing.Union[MetaOapg.properties.ecr_repo, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> typing.Union[MetaOapg.properties.pytorch_image_tag, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_tag"] - ) -> typing.Union[MetaOapg.properties.image_tag, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["tensorflow_version"]) -> typing.Union[MetaOapg.properties.tensorflow_version, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> typing.Union[MetaOapg.properties.pytorch_image_tag, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["ecr_repo"]) -> typing.Union[MetaOapg.properties.ecr_repo, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> typing.Union[MetaOapg.properties.tensorflow_version, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["image_tag"]) -> typing.Union[MetaOapg.properties.image_tag, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "ecr_repo", - "image_tag", - "pytorch_image_tag", - "tensorflow_version", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", "tensorflow_version", "ecr_repo", "image_tag", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - framework_type: "ModelBundleFrameworkType", - ecr_repo: typing.Union[MetaOapg.properties.ecr_repo, str, schemas.Unset] = schemas.unset, - image_tag: typing.Union[MetaOapg.properties.image_tag, str, schemas.Unset] = schemas.unset, - pytorch_image_tag: typing.Union[MetaOapg.properties.pytorch_image_tag, str, schemas.Unset] = schemas.unset, - tensorflow_version: typing.Union[MetaOapg.properties.tensorflow_version, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + framework_type: 'ModelBundleFrameworkType', + pytorch_image_tag: typing.Union[MetaOapg.properties.pytorch_image_tag, None, str, schemas.Unset] = schemas.unset, + tensorflow_version: typing.Union[MetaOapg.properties.tensorflow_version, None, str, schemas.Unset] = schemas.unset, + ecr_repo: typing.Union[MetaOapg.properties.ecr_repo, None, str, schemas.Unset] = schemas.unset, + image_tag: typing.Union[MetaOapg.properties.image_tag, None, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelBundleEnvironmentParams": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ModelBundleEnvironmentParams': return super().__new__( cls, *_args, framework_type=framework_type, - ecr_repo=ecr_repo, - image_tag=image_tag, pytorch_image_tag=pytorch_image_tag, tensorflow_version=tensorflow_version, + ecr_repo=ecr_repo, + image_tag=image_tag, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.model_bundle_framework_type import ( ModelBundleFrameworkType, ) diff --git a/launch/api_client/model/model_bundle_environment_params.pyi b/launch/api_client/model/model_bundle_environment_params.pyi deleted file mode 100644 index 54ace726..00000000 --- a/launch/api_client/model/model_bundle_environment_params.pyi +++ /dev/null @@ -1,164 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelBundleEnvironmentParams(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle environment parameters. Being an - entity-layer class, it should be a plain data object. - """ - - class MetaOapg: - required = { - "framework_type", - } - - class properties: - @staticmethod - def framework_type() -> typing.Type["ModelBundleFrameworkType"]: - return ModelBundleFrameworkType - ecr_repo = schemas.StrSchema - image_tag = schemas.StrSchema - pytorch_image_tag = schemas.StrSchema - tensorflow_version = schemas.StrSchema - __annotations__ = { - "framework_type": framework_type, - "ecr_repo": ecr_repo, - "image_tag": image_tag, - "pytorch_image_tag": pytorch_image_tag, - "tensorflow_version": tensorflow_version, - } - framework_type: "ModelBundleFrameworkType" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> "ModelBundleFrameworkType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["ecr_repo"]) -> MetaOapg.properties.ecr_repo: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["image_tag"]) -> MetaOapg.properties.image_tag: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> MetaOapg.properties.pytorch_image_tag: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> MetaOapg.properties.tensorflow_version: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "ecr_repo", - "image_tag", - "pytorch_image_tag", - "tensorflow_version", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> "ModelBundleFrameworkType": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["ecr_repo"] - ) -> typing.Union[MetaOapg.properties.ecr_repo, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["image_tag"] - ) -> typing.Union[MetaOapg.properties.image_tag, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> typing.Union[MetaOapg.properties.pytorch_image_tag, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> typing.Union[MetaOapg.properties.tensorflow_version, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "ecr_repo", - "image_tag", - "pytorch_image_tag", - "tensorflow_version", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - framework_type: "ModelBundleFrameworkType", - ecr_repo: typing.Union[MetaOapg.properties.ecr_repo, str, schemas.Unset] = schemas.unset, - image_tag: typing.Union[MetaOapg.properties.image_tag, str, schemas.Unset] = schemas.unset, - pytorch_image_tag: typing.Union[MetaOapg.properties.pytorch_image_tag, str, schemas.Unset] = schemas.unset, - tensorflow_version: typing.Union[MetaOapg.properties.tensorflow_version, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelBundleEnvironmentParams": - return super().__new__( - cls, - *_args, - framework_type=framework_type, - ecr_repo=ecr_repo, - image_tag=image_tag, - pytorch_image_tag=pytorch_image_tag, - tensorflow_version=tensorflow_version, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.model_bundle_framework_type import ( - ModelBundleFrameworkType, -) diff --git a/launch/api_client/model/model_bundle_framework_type.py b/launch/api_client/model/model_bundle_framework_type.py index cbbdb61d..12521da8 100644 --- a/launch/api_client/model/model_bundle_framework_type.py +++ b/launch/api_client/model/model_bundle_framework_type.py @@ -23,7 +23,10 @@ from launch.api_client import schemas # noqa: F401 -class ModelBundleFrameworkType(schemas.EnumBase, schemas.StrSchema): +class ModelBundleFrameworkType( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,21 +35,22 @@ class ModelBundleFrameworkType(schemas.EnumBase, schemas.StrSchema): The canonical list of possible machine learning frameworks of Model Bundles. """ + class MetaOapg: enum_value_to_name = { "pytorch": "PYTORCH", "tensorflow": "TENSORFLOW", "custom_base_image": "CUSTOM_BASE_IMAGE", } - + @schemas.classproperty def PYTORCH(cls): return cls("pytorch") - + @schemas.classproperty def TENSORFLOW(cls): return cls("tensorflow") - + @schemas.classproperty def CUSTOM_BASE_IMAGE(cls): return cls("custom_base_image") diff --git a/launch/api_client/model/model_bundle_order_by.py b/launch/api_client/model/model_bundle_order_by.py index 987734ac..42a5c01c 100644 --- a/launch/api_client/model/model_bundle_order_by.py +++ b/launch/api_client/model/model_bundle_order_by.py @@ -23,7 +23,10 @@ from launch.api_client import schemas # noqa: F401 -class ModelBundleOrderBy(schemas.EnumBase, schemas.StrSchema): +class ModelBundleOrderBy( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,16 +35,17 @@ class ModelBundleOrderBy(schemas.EnumBase, schemas.StrSchema): The canonical list of possible orderings of Model Bundles. """ + class MetaOapg: enum_value_to_name = { "newest": "NEWEST", "oldest": "OLDEST", } - + @schemas.classproperty def NEWEST(cls): return cls("newest") - + @schemas.classproperty def OLDEST(cls): return cls("oldest") diff --git a/launch/api_client/model/model_bundle_order_by.pyi b/launch/api_client/model/model_bundle_order_by.pyi deleted file mode 100644 index caa21cef..00000000 --- a/launch/api_client/model/model_bundle_order_by.pyi +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelBundleOrderBy(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The canonical list of possible orderings of Model Bundles. - """ - - @schemas.classproperty - def NEWEST(cls): - return cls("newest") - @schemas.classproperty - def OLDEST(cls): - return cls("oldest") diff --git a/launch/api_client/model/model_bundle_packaging_type.py b/launch/api_client/model/model_bundle_packaging_type.py index e97563f6..a67cfd36 100644 --- a/launch/api_client/model/model_bundle_packaging_type.py +++ b/launch/api_client/model/model_bundle_packaging_type.py @@ -23,32 +23,36 @@ from launch.api_client import schemas # noqa: F401 -class ModelBundlePackagingType(schemas.EnumBase, schemas.StrSchema): +class ModelBundlePackagingType( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech + Ref: https://openapi-generator.tech - Do not edit the class manually. + Do not edit the class manually. - The canonical list of possible packaging types for Model Bundles. + The canonical list of possible packaging types for Model Bundles. - These values broadly determine how the model endpoint will obtain its code & dependencies. +These values broadly determine how the model endpoint will obtain its code & dependencies. """ + class MetaOapg: enum_value_to_name = { "cloudpickle": "CLOUDPICKLE", "zip": "ZIP", "lira": "LIRA", } - + @schemas.classproperty def CLOUDPICKLE(cls): return cls("cloudpickle") - + @schemas.classproperty def ZIP(cls): return cls("zip") - + @schemas.classproperty def LIRA(cls): return cls("lira") diff --git a/launch/api_client/model/model_bundle_packaging_type.pyi b/launch/api_client/model/model_bundle_packaging_type.pyi deleted file mode 100644 index 0ff122fb..00000000 --- a/launch/api_client/model/model_bundle_packaging_type.pyi +++ /dev/null @@ -1,43 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelBundlePackagingType(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - The canonical list of possible packaging types for Model Bundles. - - These values broadly determine how the model endpoint will obtain its code & dependencies. - """ - - @schemas.classproperty - def CLOUDPICKLE(cls): - return cls("cloudpickle") - @schemas.classproperty - def ZIP(cls): - return cls("zip") - @schemas.classproperty - def LIRA(cls): - return cls("lira") diff --git a/launch/api_client/model/model_bundle_v1_response.py b/launch/api_client/model/model_bundle_v1_response.py index 90bdef95..561665b8 100644 --- a/launch/api_client/model/model_bundle_v1_response.py +++ b/launch/api_client/model/model_bundle_v1_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class ModelBundleV1Response(schemas.DictSchema): +class ModelBundleV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class ModelBundleV1Response(schemas.DictSchema): Response object for a single Model Bundle. """ + class MetaOapg: required = { "metadata", @@ -44,321 +47,270 @@ class MetaOapg: "id", "env_params", } - + class properties: - created_at = schemas.DateTimeSchema - - @staticmethod - def env_params() -> typing.Type["ModelBundleEnvironmentParams"]: - return ModelBundleEnvironmentParams - id = schemas.StrSchema + name = schemas.StrSchema location = schemas.StrSchema - metadata = schemas.DictSchema - - class model_artifact_ids(schemas.ListSchema): + + + class requirements( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_artifact_ids": + ) -> 'requirements': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - name = schemas.StrSchema - + + @staticmethod + def env_params() -> typing.Type['ModelBundleEnvironmentParams']: + return ModelBundleEnvironmentParams + @staticmethod - def packaging_type() -> typing.Type["ModelBundlePackagingType"]: + def packaging_type() -> typing.Type['ModelBundlePackagingType']: return ModelBundlePackagingType - - class requirements(schemas.ListSchema): + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + created_at = schemas.DateTimeSchema + + + class model_artifact_ids( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": + ) -> 'model_artifact_ids': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - app_config = schemas.DictSchema - schema_location = schemas.StrSchema + + + class app_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'app_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class schema_location( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'schema_location': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "created_at": created_at, - "env_params": env_params, "id": id, + "name": name, "location": location, + "requirements": requirements, + "env_params": env_params, + "packaging_type": packaging_type, "metadata": metadata, + "created_at": created_at, "model_artifact_ids": model_artifact_ids, - "name": name, - "packaging_type": packaging_type, - "requirements": requirements, "app_config": app_config, "schema_location": schema_location, } - + metadata: MetaOapg.properties.metadata requirements: MetaOapg.properties.requirements model_artifact_ids: MetaOapg.properties.model_artifact_ids - packaging_type: "ModelBundlePackagingType" + packaging_type: 'ModelBundlePackagingType' name: MetaOapg.properties.name created_at: MetaOapg.properties.created_at location: MetaOapg.properties.location id: MetaOapg.properties.id - env_params: "ModelBundleEnvironmentParams" - + env_params: 'ModelBundleEnvironmentParams' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: - ... - + def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": - ... - + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: - ... - + def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "env_params", - "id", - "location", - "metadata", - "model_artifact_ids", - "name", - "packaging_type", - "requirements", - "app_config", - "schema_location", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "location", "requirements", "env_params", "packaging_type", "metadata", "created_at", "model_artifact_ids", "app_config", "schema_location", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> 'ModelBundleEnvironmentParams': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> 'ModelBundlePackagingType': ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["schema_location"] - ) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "env_params", - "id", - "location", - "metadata", - "model_artifact_ids", - "name", - "packaging_type", - "requirements", - "app_config", - "schema_location", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "location", "requirements", "env_params", "packaging_type", "metadata", "created_at", "model_artifact_ids", "app_config", "schema_location", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - model_artifact_ids: typing.Union[ - MetaOapg.properties.model_artifact_ids, - list, - tuple, - ], - packaging_type: "ModelBundlePackagingType", - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - env_params: "ModelBundleEnvironmentParams", - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - schema_location: typing.Union[MetaOapg.properties.schema_location, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], + model_artifact_ids: typing.Union[MetaOapg.properties.model_artifact_ids, list, tuple, ], + packaging_type: 'ModelBundlePackagingType', + name: typing.Union[MetaOapg.properties.name, str, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], + location: typing.Union[MetaOapg.properties.location, str, ], + id: typing.Union[MetaOapg.properties.id, str, ], + env_params: 'ModelBundleEnvironmentParams', + app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + schema_location: typing.Union[MetaOapg.properties.schema_location, None, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelBundleV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ModelBundleV1Response': return super().__new__( cls, *_args, @@ -377,7 +329,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.model_bundle_environment_params import ( ModelBundleEnvironmentParams, ) diff --git a/launch/api_client/model/model_bundle_v1_response.pyi b/launch/api_client/model/model_bundle_v1_response.pyi deleted file mode 100644 index 44b97398..00000000 --- a/launch/api_client/model/model_bundle_v1_response.pyi +++ /dev/null @@ -1,327 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelBundleV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for a single Model Bundle. - """ - - class MetaOapg: - required = { - "metadata", - "requirements", - "model_artifact_ids", - "packaging_type", - "name", - "created_at", - "location", - "id", - "env_params", - } - - class properties: - created_at = schemas.DateTimeSchema - - @staticmethod - def env_params() -> typing.Type["ModelBundleEnvironmentParams"]: - return ModelBundleEnvironmentParams - id = schemas.StrSchema - location = schemas.StrSchema - metadata = schemas.DictSchema - - class model_artifact_ids(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_artifact_ids": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - name = schemas.StrSchema - - @staticmethod - def packaging_type() -> typing.Type["ModelBundlePackagingType"]: - return ModelBundlePackagingType - - class requirements(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - app_config = schemas.DictSchema - schema_location = schemas.StrSchema - __annotations__ = { - "created_at": created_at, - "env_params": env_params, - "id": id, - "location": location, - "metadata": metadata, - "model_artifact_ids": model_artifact_ids, - "name": name, - "packaging_type": packaging_type, - "requirements": requirements, - "app_config": app_config, - "schema_location": schema_location, - } - metadata: MetaOapg.properties.metadata - requirements: MetaOapg.properties.requirements - model_artifact_ids: MetaOapg.properties.model_artifact_ids - packaging_type: "ModelBundlePackagingType" - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - location: MetaOapg.properties.location - id: MetaOapg.properties.id - env_params: "ModelBundleEnvironmentParams" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["schema_location"] - ) -> MetaOapg.properties.schema_location: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "env_params", - "id", - "location", - "metadata", - "model_artifact_ids", - "name", - "packaging_type", - "requirements", - "app_config", - "schema_location", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["env_params"]) -> "ModelBundleEnvironmentParams": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["packaging_type"]) -> "ModelBundlePackagingType": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["schema_location"] - ) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "env_params", - "id", - "location", - "metadata", - "model_artifact_ids", - "name", - "packaging_type", - "requirements", - "app_config", - "schema_location", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - model_artifact_ids: typing.Union[ - MetaOapg.properties.model_artifact_ids, - list, - tuple, - ], - packaging_type: "ModelBundlePackagingType", - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - env_params: "ModelBundleEnvironmentParams", - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - schema_location: typing.Union[MetaOapg.properties.schema_location, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelBundleV1Response": - return super().__new__( - cls, - *_args, - metadata=metadata, - requirements=requirements, - model_artifact_ids=model_artifact_ids, - packaging_type=packaging_type, - name=name, - created_at=created_at, - location=location, - id=id, - env_params=env_params, - app_config=app_config, - schema_location=schema_location, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.model_bundle_environment_params import ( - ModelBundleEnvironmentParams, -) -from launch_client.model.model_bundle_packaging_type import ( - ModelBundlePackagingType, -) diff --git a/launch/api_client/model/model_bundle_v2_response.py b/launch/api_client/model/model_bundle_v2_response.py index bd1bec4b..3d23be2d 100644 --- a/launch/api_client/model/model_bundle_v2_response.py +++ b/launch/api_client/model/model_bundle_v2_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class ModelBundleV2Response(schemas.DictSchema): +class ModelBundleV2Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class ModelBundleV2Response(schemas.DictSchema): Response object for a single Model Bundle. """ + class MetaOapg: required = { "flavor", @@ -41,14 +44,72 @@ class MetaOapg: "created_at", "id", } - + class properties: + id = schemas.StrSchema + name = schemas.StrSchema + + + class metadata( + schemas.DictSchema + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) created_at = schemas.DateTimeSchema - + + + class model_artifact_ids( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model_artifact_ids': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + class flavor( schemas.ComposedSchema, ): + + class MetaOapg: + @classmethod @functools.lru_cache() def one_of(cls): @@ -66,277 +127,127 @@ def one_of(cls): StreamingEnhancedRunnableImageFlavor, TritonEnhancedRunnableImageFlavor, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "flavor": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'flavor': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - id = schemas.StrSchema - metadata = schemas.DictSchema - - class model_artifact_ids(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - + + + class schema_location( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[None, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_artifact_ids": + ) -> 'schema_location': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - name = schemas.StrSchema - schema_location = schemas.StrSchema __annotations__ = { - "created_at": created_at, - "flavor": flavor, "id": id, + "name": name, "metadata": metadata, + "created_at": created_at, "model_artifact_ids": model_artifact_ids, - "name": name, + "flavor": flavor, "schema_location": schema_location, } - + flavor: MetaOapg.properties.flavor metadata: MetaOapg.properties.metadata model_artifact_ids: MetaOapg.properties.model_artifact_ids name: MetaOapg.properties.name created_at: MetaOapg.properties.created_at id: MetaOapg.properties.id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: - ... - + def __getitem__(self, name: typing_extensions.Literal["schema_location"]) -> MetaOapg.properties.schema_location: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "flavor", - "id", - "metadata", - "model_artifact_ids", - "name", - "schema_location", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", "name", "metadata", "created_at", "model_artifact_ids", "flavor", "schema_location", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_artifact_ids"]) -> MetaOapg.properties.model_artifact_ids: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["schema_location"] - ) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["schema_location"]) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "flavor", - "id", - "metadata", - "model_artifact_ids", - "name", - "schema_location", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", "name", "metadata", "created_at", "model_artifact_ids", "flavor", "schema_location", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - model_artifact_ids: typing.Union[ - MetaOapg.properties.model_artifact_ids, - list, - tuple, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - schema_location: typing.Union[MetaOapg.properties.schema_location, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + flavor: typing.Union[MetaOapg.properties.flavor, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, ], + model_artifact_ids: typing.Union[MetaOapg.properties.model_artifact_ids, list, tuple, ], + name: typing.Union[MetaOapg.properties.name, str, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, datetime, ], + id: typing.Union[MetaOapg.properties.id, str, ], + schema_location: typing.Union[MetaOapg.properties.schema_location, None, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelBundleV2Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ModelBundleV2Response': return super().__new__( cls, *_args, @@ -351,7 +262,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.cloudpickle_artifact_flavor import ( CloudpickleArtifactFlavor, ) diff --git a/launch/api_client/model/model_bundle_v2_response.pyi b/launch/api_client/model/model_bundle_v2_response.pyi deleted file mode 100644 index 13b0f331..00000000 --- a/launch/api_client/model/model_bundle_v2_response.pyi +++ /dev/null @@ -1,324 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelBundleV2Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Response object for a single Model Bundle. - """ - - class MetaOapg: - required = { - "flavor", - "metadata", - "model_artifact_ids", - "name", - "created_at", - "id", - } - - class properties: - created_at = schemas.DateTimeSchema - - class flavor( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - CloudpickleArtifactFlavor, - ZipArtifactFlavor, - RunnableImageFlavor, - StreamingEnhancedRunnableImageFlavor, - TritonEnhancedRunnableImageFlavor, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "flavor": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - id = schemas.StrSchema - metadata = schemas.DictSchema - - class model_artifact_ids(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "model_artifact_ids": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - name = schemas.StrSchema - schema_location = schemas.StrSchema - __annotations__ = { - "created_at": created_at, - "flavor": flavor, - "id": id, - "metadata": metadata, - "model_artifact_ids": model_artifact_ids, - "name": name, - "schema_location": schema_location, - } - flavor: MetaOapg.properties.flavor - metadata: MetaOapg.properties.metadata - model_artifact_ids: MetaOapg.properties.model_artifact_ids - name: MetaOapg.properties.name - created_at: MetaOapg.properties.created_at - id: MetaOapg.properties.id - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["schema_location"] - ) -> MetaOapg.properties.schema_location: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "flavor", - "id", - "metadata", - "model_artifact_ids", - "name", - "schema_location", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_artifact_ids"] - ) -> MetaOapg.properties.model_artifact_ids: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["schema_location"] - ) -> typing.Union[MetaOapg.properties.schema_location, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "created_at", - "flavor", - "id", - "metadata", - "model_artifact_ids", - "name", - "schema_location", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - metadata: typing.Union[ - MetaOapg.properties.metadata, - dict, - frozendict.frozendict, - ], - model_artifact_ids: typing.Union[ - MetaOapg.properties.model_artifact_ids, - list, - tuple, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - created_at: typing.Union[ - MetaOapg.properties.created_at, - str, - datetime, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], - schema_location: typing.Union[MetaOapg.properties.schema_location, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelBundleV2Response": - return super().__new__( - cls, - *_args, - flavor=flavor, - metadata=metadata, - model_artifact_ids=model_artifact_ids, - name=name, - created_at=created_at, - id=id, - schema_location=schema_location, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.cloudpickle_artifact_flavor import ( - CloudpickleArtifactFlavor, -) -from launch_client.model.runnable_image_flavor import RunnableImageFlavor -from launch_client.model.streaming_enhanced_runnable_image_flavor import ( - StreamingEnhancedRunnableImageFlavor, -) -from launch_client.model.triton_enhanced_runnable_image_flavor import ( - TritonEnhancedRunnableImageFlavor, -) -from launch_client.model.zip_artifact_flavor import ZipArtifactFlavor diff --git a/launch/api_client/model/model_download_request.py b/launch/api_client/model/model_download_request.py index 097efc84..6b0dbca7 100644 --- a/launch/api_client/model/model_download_request.py +++ b/launch/api_client/model/model_download_request.py @@ -23,108 +23,85 @@ from launch.api_client import schemas # noqa: F401 -class ModelDownloadRequest(schemas.DictSchema): +class ModelDownloadRequest( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "model_name", } - + class properties: model_name = schemas.StrSchema - download_format = schemas.StrSchema + + + class download_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'download_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "model_name": model_name, "download_format": download_format, } - + model_name: MetaOapg.properties.model_name - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["download_format"]) -> MetaOapg.properties.download_format: - ... - + def __getitem__(self, name: typing_extensions.Literal["download_format"]) -> MetaOapg.properties.download_format: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_name", - "download_format", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_name", "download_format", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["download_format"] - ) -> typing.Union[MetaOapg.properties.download_format, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["download_format"]) -> typing.Union[MetaOapg.properties.download_format, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_name", - "download_format", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_name", "download_format", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_name: typing.Union[ - MetaOapg.properties.model_name, - str, - ], - download_format: typing.Union[MetaOapg.properties.download_format, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + model_name: typing.Union[MetaOapg.properties.model_name, str, ], + download_format: typing.Union[MetaOapg.properties.download_format, None, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelDownloadRequest": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ModelDownloadRequest': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/model_download_request.pyi b/launch/api_client/model/model_download_request.pyi deleted file mode 100644 index be2d50bd..00000000 --- a/launch/api_client/model/model_download_request.pyi +++ /dev/null @@ -1,120 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelDownloadRequest(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "model_name", - } - - class properties: - model_name = schemas.StrSchema - download_format = schemas.StrSchema - __annotations__ = { - "model_name": model_name, - "download_format": download_format, - } - model_name: MetaOapg.properties.model_name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["download_format"] - ) -> MetaOapg.properties.download_format: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_name", - "download_format", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["download_format"] - ) -> typing.Union[MetaOapg.properties.download_format, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "model_name", - "download_format", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - model_name: typing.Union[ - MetaOapg.properties.model_name, - str, - ], - download_format: typing.Union[MetaOapg.properties.download_format, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelDownloadRequest": - return super().__new__( - cls, - *_args, - model_name=model_name, - download_format=download_format, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/model_download_response.py b/launch/api_client/model/model_download_response.py index 12a89fbd..2f631380 100644 --- a/launch/api_client/model/model_download_response.py +++ b/launch/api_client/model/model_download_response.py @@ -23,125 +23,85 @@ from launch.api_client import schemas # noqa: F401 -class ModelDownloadResponse(schemas.DictSchema): +class ModelDownloadResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "urls", } - + class properties: - class urls(schemas.DictSchema): + + + class urls( + schemas.DictSchema + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "urls": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'urls': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - __annotations__ = { "urls": urls, } - + urls: MetaOapg.properties.urls - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: - ... - + def __getitem__(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["urls",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["urls", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["urls",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["urls", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - urls: typing.Union[ - MetaOapg.properties.urls, - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + urls: typing.Union[MetaOapg.properties.urls, dict, frozendict.frozendict, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelDownloadResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ModelDownloadResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/model_download_response.pyi b/launch/api_client/model/model_download_response.pyi deleted file mode 100644 index 1e316c7d..00000000 --- a/launch/api_client/model/model_download_response.pyi +++ /dev/null @@ -1,134 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelDownloadResponse(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "urls", - } - - class properties: - class urls(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "urls": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "urls": urls, - } - urls: MetaOapg.properties.urls - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["urls",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["urls"]) -> MetaOapg.properties.urls: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["urls",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - urls: typing.Union[ - MetaOapg.properties.urls, - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelDownloadResponse": - return super().__new__( - cls, - *_args, - urls=urls, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/model_endpoint_deployment_state.py b/launch/api_client/model/model_endpoint_deployment_state.py index c0a13dff..db50ffc6 100644 --- a/launch/api_client/model/model_endpoint_deployment_state.py +++ b/launch/api_client/model/model_endpoint_deployment_state.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class ModelEndpointDeploymentState(schemas.DictSchema): +class ModelEndpointDeploymentState( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,181 +34,167 @@ class ModelEndpointDeploymentState(schemas.DictSchema): This is the entity-layer class for the deployment settings related to a Model Endpoint. """ + class MetaOapg: required = { "max_workers", "min_workers", + "concurrent_requests_per_worker", "per_worker", } - + class properties: - class max_workers(schemas.IntSchema): + + + class min_workers( + schemas.IntSchema + ): + + class MetaOapg: inclusive_minimum = 0 - - class min_workers(schemas.IntSchema): + + + class max_workers( + schemas.IntSchema + ): + + class MetaOapg: inclusive_minimum = 0 - per_worker = schemas.IntSchema - - class available_workers(schemas.IntSchema): + concurrent_requests_per_worker = schemas.IntSchema + + + class available_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_minimum = 0 - - class unavailable_workers(schemas.IntSchema): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'available_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class unavailable_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_minimum = 0 - + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'unavailable_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "max_workers": max_workers, "min_workers": min_workers, + "max_workers": max_workers, "per_worker": per_worker, + "concurrent_requests_per_worker": concurrent_requests_per_worker, "available_workers": available_workers, "unavailable_workers": unavailable_workers, } - + max_workers: MetaOapg.properties.max_workers min_workers: MetaOapg.properties.min_workers + concurrent_requests_per_worker: MetaOapg.properties.concurrent_requests_per_worker per_worker: MetaOapg.properties.per_worker - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["available_workers"] - ) -> MetaOapg.properties.available_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["unavailable_workers"] - ) -> MetaOapg.properties.unavailable_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["available_workers"]) -> MetaOapg.properties.available_workers: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_workers", - "min_workers", - "per_worker", - "available_workers", - "unavailable_workers", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["unavailable_workers"]) -> MetaOapg.properties.unavailable_workers: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "available_workers", "unavailable_workers", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["available_workers"] - ) -> typing.Union[MetaOapg.properties.available_workers, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["unavailable_workers"] - ) -> typing.Union[MetaOapg.properties.unavailable_workers, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["available_workers"]) -> typing.Union[MetaOapg.properties.available_workers, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_workers", - "min_workers", - "per_worker", - "available_workers", - "unavailable_workers", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["unavailable_workers"]) -> typing.Union[MetaOapg.properties.unavailable_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "available_workers", "unavailable_workers", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_workers: typing.Union[ - MetaOapg.properties.max_workers, - decimal.Decimal, - int, - ], - min_workers: typing.Union[ - MetaOapg.properties.min_workers, - decimal.Decimal, - int, - ], - per_worker: typing.Union[ - MetaOapg.properties.per_worker, - decimal.Decimal, - int, - ], - available_workers: typing.Union[ - MetaOapg.properties.available_workers, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - unavailable_workers: typing.Union[ - MetaOapg.properties.unavailable_workers, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, ], + min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, ], + concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, decimal.Decimal, int, ], + per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, ], + available_workers: typing.Union[MetaOapg.properties.available_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + unavailable_workers: typing.Union[MetaOapg.properties.unavailable_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelEndpointDeploymentState": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ModelEndpointDeploymentState': return super().__new__( cls, *_args, max_workers=max_workers, min_workers=min_workers, + concurrent_requests_per_worker=concurrent_requests_per_worker, per_worker=per_worker, available_workers=available_workers, unavailable_workers=unavailable_workers, diff --git a/launch/api_client/model/model_endpoint_deployment_state.pyi b/launch/api_client/model/model_endpoint_deployment_state.pyi deleted file mode 100644 index ff9c7ac0..00000000 --- a/launch/api_client/model/model_endpoint_deployment_state.pyi +++ /dev/null @@ -1,180 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelEndpointDeploymentState(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the deployment settings related to a Model Endpoint. - """ - - class MetaOapg: - required = { - "max_workers", - "min_workers", - "per_worker", - } - - class properties: - class max_workers(schemas.IntSchema): - pass - - class min_workers(schemas.IntSchema): - pass - per_worker = schemas.IntSchema - - class available_workers(schemas.IntSchema): - pass - - class unavailable_workers(schemas.IntSchema): - pass - __annotations__ = { - "max_workers": max_workers, - "min_workers": min_workers, - "per_worker": per_worker, - "available_workers": available_workers, - "unavailable_workers": unavailable_workers, - } - max_workers: MetaOapg.properties.max_workers - min_workers: MetaOapg.properties.min_workers - per_worker: MetaOapg.properties.per_worker - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["available_workers"] - ) -> MetaOapg.properties.available_workers: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["unavailable_workers"] - ) -> MetaOapg.properties.unavailable_workers: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_workers", - "min_workers", - "per_worker", - "available_workers", - "unavailable_workers", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["available_workers"] - ) -> typing.Union[MetaOapg.properties.available_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["unavailable_workers"] - ) -> typing.Union[MetaOapg.properties.unavailable_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "max_workers", - "min_workers", - "per_worker", - "available_workers", - "unavailable_workers", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - max_workers: typing.Union[ - MetaOapg.properties.max_workers, - decimal.Decimal, - int, - ], - min_workers: typing.Union[ - MetaOapg.properties.min_workers, - decimal.Decimal, - int, - ], - per_worker: typing.Union[ - MetaOapg.properties.per_worker, - decimal.Decimal, - int, - ], - available_workers: typing.Union[ - MetaOapg.properties.available_workers, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - unavailable_workers: typing.Union[ - MetaOapg.properties.unavailable_workers, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelEndpointDeploymentState": - return super().__new__( - cls, - *_args, - max_workers=max_workers, - min_workers=min_workers, - per_worker=per_worker, - available_workers=available_workers, - unavailable_workers=unavailable_workers, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/model_endpoint_order_by.py b/launch/api_client/model/model_endpoint_order_by.py index e2eb86a1..a14d1946 100644 --- a/launch/api_client/model/model_endpoint_order_by.py +++ b/launch/api_client/model/model_endpoint_order_by.py @@ -23,7 +23,10 @@ from launch.api_client import schemas # noqa: F401 -class ModelEndpointOrderBy(schemas.EnumBase, schemas.StrSchema): +class ModelEndpointOrderBy( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,21 +35,22 @@ class ModelEndpointOrderBy(schemas.EnumBase, schemas.StrSchema): The canonical list of possible orderings of Model Bundles. """ + class MetaOapg: enum_value_to_name = { "newest": "NEWEST", "oldest": "OLDEST", "alphabetical": "ALPHABETICAL", } - + @schemas.classproperty def NEWEST(cls): return cls("newest") - + @schemas.classproperty def OLDEST(cls): return cls("oldest") - + @schemas.classproperty def ALPHABETICAL(cls): return cls("alphabetical") diff --git a/launch/api_client/model/model_endpoint_resource_state.py b/launch/api_client/model/model_endpoint_resource_state.py index f6ed43ff..a613f5af 100644 --- a/launch/api_client/model/model_endpoint_resource_state.py +++ b/launch/api_client/model/model_endpoint_resource_state.py @@ -23,31 +23,45 @@ from launch.api_client import schemas # noqa: F401 -class ModelEndpointResourceState(schemas.DictSchema): +class ModelEndpointResourceState( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. This is the entity-layer class for the resource settings per worker of a Model Endpoint. +Note: in the multinode case, there are multiple "nodes" per "worker". +"Nodes" is analogous to a single k8s pod that may take up all the GPUs on a single machine. +"Workers" is the smallest unit that a request can be made to, and consists of one leader "node" and +multiple follower "nodes" (named "worker" in the k8s LeaderWorkerSet definition). +cpus/gpus/memory/storage are per-node, thus the total consumption by a "worker" +is cpus/gpus/etc. multiplied by nodes_per_worker. """ + class MetaOapg: required = { "memory", "cpus", "gpus", + "nodes_per_worker", } - + class properties: + + class cpus( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -63,64 +77,41 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - class gpus(schemas.IntSchema): + + + class gpus( + schemas.IntSchema + ): + + class MetaOapg: inclusive_minimum = 0 - + + class memory( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -136,66 +127,45 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class nodes_per_worker( + schemas.IntSchema + ): + + + class MetaOapg: + inclusive_minimum = 1 + @staticmethod - def gpu_type() -> typing.Type["GpuType"]: + def gpu_type() -> typing.Type['GpuType']: return GpuType - - optimize_costs = schemas.BoolSchema - + + class storage( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -211,259 +181,137 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "cpus": cpus, "gpus": gpus, "memory": memory, + "nodes_per_worker": nodes_per_worker, "gpu_type": gpu_type, - "optimize_costs": optimize_costs, "storage": storage, + "optimize_costs": optimize_costs, } - + memory: MetaOapg.properties.memory cpus: MetaOapg.properties.cpus gpus: MetaOapg.properties.gpus - + nodes_per_worker: MetaOapg.properties.nodes_per_worker + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": - ... - + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpus", - "memory", - "gpu_type", - "optimize_costs", - "storage", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["cpus", "gpus", "memory", "nodes_per_worker", "gpu_type", "storage", "optimize_costs", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpus", - "memory", - "gpu_type", - "optimize_costs", - "storage", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cpus", "gpus", "memory", "nodes_per_worker", "gpu_type", "storage", "optimize_costs", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - gpus: typing.Union[ - MetaOapg.properties.gpus, - decimal.Decimal, - int, - ], - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, ], + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, decimal.Decimal, int, ], + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelEndpointResourceState": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ModelEndpointResourceState': return super().__new__( cls, *_args, memory=memory, cpus=cpus, gpus=gpus, + nodes_per_worker=nodes_per_worker, gpu_type=gpu_type, - optimize_costs=optimize_costs, storage=storage, + optimize_costs=optimize_costs, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/model_endpoint_resource_state.pyi b/launch/api_client/model/model_endpoint_resource_state.pyi deleted file mode 100644 index 01d11482..00000000 --- a/launch/api_client/model/model_endpoint_resource_state.pyi +++ /dev/null @@ -1,428 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelEndpointResourceState(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the resource settings per worker of a Model Endpoint. - """ - - class MetaOapg: - required = { - "memory", - "cpus", - "gpus", - } - - class properties: - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - class gpus(schemas.IntSchema): - pass - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - optimize_costs = schemas.BoolSchema - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "cpus": cpus, - "gpus": gpus, - "memory": memory, - "gpu_type": gpu_type, - "optimize_costs": optimize_costs, - "storage": storage, - } - memory: MetaOapg.properties.memory - cpus: MetaOapg.properties.cpus - gpus: MetaOapg.properties.gpus - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpus", - "memory", - "gpu_type", - "optimize_costs", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cpus", - "gpus", - "memory", - "gpu_type", - "optimize_costs", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - gpus: typing.Union[ - MetaOapg.properties.gpus, - decimal.Decimal, - int, - ], - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ModelEndpointResourceState": - return super().__new__( - cls, - *_args, - memory=memory, - cpus=cpus, - gpus=gpus, - gpu_type=gpu_type, - optimize_costs=optimize_costs, - storage=storage, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/model_endpoint_status.py b/launch/api_client/model/model_endpoint_status.py index 8703e65a..7623e0a0 100644 --- a/launch/api_client/model/model_endpoint_status.py +++ b/launch/api_client/model/model_endpoint_status.py @@ -23,15 +23,17 @@ from launch.api_client import schemas # noqa: F401 -class ModelEndpointStatus(schemas.EnumBase, schemas.StrSchema): +class ModelEndpointStatus( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "READY": "READY", @@ -40,23 +42,23 @@ class MetaOapg: "UPDATE_FAILED": "UPDATE_FAILED", "DELETE_IN_PROGRESS": "DELETE_IN_PROGRESS", } - + @schemas.classproperty def READY(cls): return cls("READY") - + @schemas.classproperty def UPDATE_PENDING(cls): return cls("UPDATE_PENDING") - + @schemas.classproperty def UPDATE_IN_PROGRESS(cls): return cls("UPDATE_IN_PROGRESS") - + @schemas.classproperty def UPDATE_FAILED(cls): return cls("UPDATE_FAILED") - + @schemas.classproperty def DELETE_IN_PROGRESS(cls): return cls("DELETE_IN_PROGRESS") diff --git a/launch/api_client/model/model_endpoint_status.pyi b/launch/api_client/model/model_endpoint_status.pyi deleted file mode 100644 index 8f1377bd..00000000 --- a/launch/api_client/model/model_endpoint_status.pyi +++ /dev/null @@ -1,47 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelEndpointStatus(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An enumeration. - """ - - @schemas.classproperty - def READY(cls): - return cls("READY") - @schemas.classproperty - def UPDATE_PENDING(cls): - return cls("UPDATE_PENDING") - @schemas.classproperty - def UPDATE_IN_PROGRESS(cls): - return cls("UPDATE_IN_PROGRESS") - @schemas.classproperty - def UPDATE_FAILED(cls): - return cls("UPDATE_FAILED") - @schemas.classproperty - def DELETE_IN_PROGRESS(cls): - return cls("DELETE_IN_PROGRESS") diff --git a/launch/api_client/model/model_endpoint_type.py b/launch/api_client/model/model_endpoint_type.py index ec2773b4..8122d04d 100644 --- a/launch/api_client/model/model_endpoint_type.py +++ b/launch/api_client/model/model_endpoint_type.py @@ -23,30 +23,32 @@ from launch.api_client import schemas # noqa: F401 -class ModelEndpointType(schemas.EnumBase, schemas.StrSchema): +class ModelEndpointType( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "async": "ASYNC", "sync": "SYNC", "streaming": "STREAMING", } - + @schemas.classproperty def ASYNC(cls): return cls("async") - + @schemas.classproperty def SYNC(cls): return cls("sync") - + @schemas.classproperty def STREAMING(cls): return cls("streaming") diff --git a/launch/api_client/model/model_endpoint_type.pyi b/launch/api_client/model/model_endpoint_type.pyi deleted file mode 100644 index cc9d910f..00000000 --- a/launch/api_client/model/model_endpoint_type.pyi +++ /dev/null @@ -1,41 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ModelEndpointType(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An enumeration. - """ - - @schemas.classproperty - def ASYNC(cls): - return cls("async") - @schemas.classproperty - def SYNC(cls): - return cls("sync") - @schemas.classproperty - def STREAMING(cls): - return cls("streaming") diff --git a/launch/api_client/model/response_schema.pyi b/launch/api_client/model/parallel_tool_calls.py similarity index 85% rename from launch/api_client/model/response_schema.pyi rename to launch/api_client/model/parallel_tool_calls.py index 800305f1..0649343d 100644 --- a/launch/api_client/model/response_schema.pyi +++ b/launch/api_client/model/parallel_tool_calls.py @@ -19,6 +19,7 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -ResponseSchema = schemas.AnyTypeSchema +from launch.api_client import schemas # noqa: F401 + +ParallelToolCalls = schemas.BoolSchema diff --git a/launch/api_client/model/prediction_content.py b/launch/api_client/model/prediction_content.py new file mode 100644 index 00000000..cb956908 --- /dev/null +++ b/launch/api_client/model/prediction_content.py @@ -0,0 +1,149 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class PredictionContent( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "type", + "content", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "content": "CONTENT", + } + + @schemas.classproperty + def CONTENT(cls): + return cls("content") + + + class content( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + Content8, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'content': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + __annotations__ = { + "type": type, + "content": content, + } + + type: MetaOapg.properties.type + content: MetaOapg.properties.content + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "content", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> MetaOapg.properties.content: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "content", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + type: typing.Union[MetaOapg.properties.type, str, ], + content: typing.Union[MetaOapg.properties.content, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'PredictionContent': + return super().__new__( + cls, + *_args, + type=type, + content=content, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.content8 import Content8 diff --git a/launch/api_client/model/prompt.py b/launch/api_client/model/prompt.py new file mode 100644 index 00000000..7a7c3a52 --- /dev/null +++ b/launch/api_client/model/prompt.py @@ -0,0 +1,58 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Prompt( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + +Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + + """ + + + class MetaOapg: + items = schemas.IntSchema + min_items = 1 + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) diff --git a/launch/api_client/model/prompt1.py b/launch/api_client/model/prompt1.py new file mode 100644 index 00000000..62168649 --- /dev/null +++ b/launch/api_client/model/prompt1.py @@ -0,0 +1,63 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class Prompt1( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + +Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + + """ + + + class MetaOapg: + + @staticmethod + def items() -> typing.Type['Prompt1Item']: + return Prompt1Item + min_items = 1 + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Prompt1': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + +from launch.api_client.model.prompt1_item import Prompt1Item diff --git a/launch/api_client/model/model_bundle_framework_type.pyi b/launch/api_client/model/prompt1_item.py similarity index 50% rename from launch/api_client/model/model_bundle_framework_type.pyi rename to launch/api_client/model/prompt1_item.py index 6dac94df..287b2355 100644 --- a/launch/api_client/model/model_bundle_framework_type.pyi +++ b/launch/api_client/model/prompt1_item.py @@ -19,23 +19,34 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class ModelBundleFrameworkType(schemas.EnumBase, schemas.StrSchema): +from launch.api_client import schemas # noqa: F401 + + +class Prompt1Item( + schemas.ListSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - The canonical list of possible machine learning frameworks of Model Bundles. """ - @schemas.classproperty - def PYTORCH(cls): - return cls("pytorch") - @schemas.classproperty - def TENSORFLOW(cls): - return cls("tensorflow") - @schemas.classproperty - def CUSTOM_BASE_IMAGE(cls): - return cls("custom_base_image") + + class MetaOapg: + min_items = 1 + items = schemas.IntSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, decimal.Decimal, int, ]], typing.List[typing.Union[MetaOapg.items, decimal.Decimal, int, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'Prompt1Item': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) diff --git a/launch/api_client/model/prompt_tokens_details.py b/launch/api_client/model/prompt_tokens_details.py new file mode 100644 index 00000000..3f253fa9 --- /dev/null +++ b/launch/api_client/model/prompt_tokens_details.py @@ -0,0 +1,88 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class PromptTokensDetails( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + audio_tokens = schemas.IntSchema + cached_tokens = schemas.IntSchema + __annotations__ = { + "audio_tokens": audio_tokens, + "cached_tokens": cached_tokens, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["audio_tokens"]) -> MetaOapg.properties.audio_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cached_tokens"]) -> MetaOapg.properties.cached_tokens: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["audio_tokens", "cached_tokens", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["audio_tokens"]) -> typing.Union[MetaOapg.properties.audio_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cached_tokens"]) -> typing.Union[MetaOapg.properties.cached_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["audio_tokens", "cached_tokens", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + audio_tokens: typing.Union[MetaOapg.properties.audio_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, + cached_tokens: typing.Union[MetaOapg.properties.cached_tokens, decimal.Decimal, int, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'PromptTokensDetails': + return super().__new__( + cls, + *_args, + audio_tokens=audio_tokens, + cached_tokens=cached_tokens, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/pytorch_framework.py b/launch/api_client/model/pytorch_framework.py index 08ccf102..d5e96a8e 100644 --- a/launch/api_client/model/pytorch_framework.py +++ b/launch/api_client/model/pytorch_framework.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class PytorchFramework(schemas.DictSchema): +class PytorchFramework( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,117 +34,74 @@ class PytorchFramework(schemas.DictSchema): This is the entity-layer class for a Pytorch framework specification. """ + class MetaOapg: required = { "pytorch_image_tag", "framework_type", } - + class properties: - class framework_type(schemas.EnumBase, schemas.StrSchema): + + + class framework_type( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { "pytorch": "PYTORCH", } - + @schemas.classproperty def PYTORCH(cls): return cls("pytorch") - pytorch_image_tag = schemas.StrSchema __annotations__ = { "framework_type": framework_type, "pytorch_image_tag": pytorch_image_tag, } - + pytorch_image_tag: MetaOapg.properties.pytorch_image_tag framework_type: MetaOapg.properties.framework_type - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: - ... - + def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> MetaOapg.properties.pytorch_image_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> MetaOapg.properties.pytorch_image_tag: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "pytorch_image_tag", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> MetaOapg.properties.pytorch_image_tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["pytorch_image_tag"]) -> MetaOapg.properties.pytorch_image_tag: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "pytorch_image_tag", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "pytorch_image_tag", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - pytorch_image_tag: typing.Union[ - MetaOapg.properties.pytorch_image_tag, - str, - ], - framework_type: typing.Union[ - MetaOapg.properties.framework_type, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + pytorch_image_tag: typing.Union[MetaOapg.properties.pytorch_image_tag, str, ], + framework_type: typing.Union[MetaOapg.properties.framework_type, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "PytorchFramework": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'PytorchFramework': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/pytorch_framework.pyi b/launch/api_client/model/pytorch_framework.pyi deleted file mode 100644 index 422a2256..00000000 --- a/launch/api_client/model/pytorch_framework.pyi +++ /dev/null @@ -1,132 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class PytorchFramework(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a Pytorch framework specification. - """ - - class MetaOapg: - required = { - "pytorch_image_tag", - "framework_type", - } - - class properties: - class framework_type(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def PYTORCH(cls): - return cls("pytorch") - pytorch_image_tag = schemas.StrSchema - __annotations__ = { - "framework_type": framework_type, - "pytorch_image_tag": pytorch_image_tag, - } - pytorch_image_tag: MetaOapg.properties.pytorch_image_tag - framework_type: MetaOapg.properties.framework_type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> MetaOapg.properties.pytorch_image_tag: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "pytorch_image_tag", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["framework_type"] - ) -> MetaOapg.properties.framework_type: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["pytorch_image_tag"] - ) -> MetaOapg.properties.pytorch_image_tag: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "pytorch_image_tag", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - pytorch_image_tag: typing.Union[ - MetaOapg.properties.pytorch_image_tag, - str, - ], - framework_type: typing.Union[ - MetaOapg.properties.framework_type, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "PytorchFramework": - return super().__new__( - cls, - *_args, - pytorch_image_tag=pytorch_image_tag, - framework_type=framework_type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/quantization.py b/launch/api_client/model/quantization.py index 15bdbdbd..f9564c4a 100644 --- a/launch/api_client/model/quantization.py +++ b/launch/api_client/model/quantization.py @@ -23,25 +23,27 @@ from launch.api_client import schemas # noqa: F401 -class Quantization(schemas.EnumBase, schemas.StrSchema): +class Quantization( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "bitsandbytes": "BITSANDBYTES", "awq": "AWQ", } - + @schemas.classproperty def BITSANDBYTES(cls): return cls("bitsandbytes") - + @schemas.classproperty def AWQ(cls): return cls("awq") diff --git a/launch/api_client/model/quantization.pyi b/launch/api_client/model/quantization.pyi deleted file mode 100644 index 84f7bee8..00000000 --- a/launch/api_client/model/quantization.pyi +++ /dev/null @@ -1,38 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class Quantization(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An enumeration. - """ - - @schemas.classproperty - def BITSANDBYTES(cls): - return cls("bitsandbytes") - @schemas.classproperty - def AWQ(cls): - return cls("awq") diff --git a/launch/api_client/model/reasoning_effort.py b/launch/api_client/model/reasoning_effort.py new file mode 100644 index 00000000..ae2994da --- /dev/null +++ b/launch/api_client/model/reasoning_effort.py @@ -0,0 +1,78 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ReasoningEffort( + schemas.EnumBase, + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + **o-series models only** + +Constrains effort on reasoning for +[reasoning models](https://platform.openai.com/docs/guides/reasoning). +Currently supported values are `low`, `medium`, and `high`. Reducing +reasoning effort can result in faster responses and fewer tokens used +on reasoning in a response. + + """ + + + class MetaOapg: + enum_value_to_name = { + "low": "LOW", + "medium": "MEDIUM", + "high": "HIGH", + } + + @schemas.classproperty + def LOW(cls): + return cls("low") + + @schemas.classproperty + def MEDIUM(cls): + return cls("medium") + + @schemas.classproperty + def HIGH(cls): + return cls("high") + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ReasoningEffort': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) diff --git a/launch/api_client/model/get_docker_image_batch_job_v1_response.pyi b/launch/api_client/model/response_format_json_object.py similarity index 50% rename from launch/api_client/model/get_docker_image_batch_job_v1_response.pyi rename to launch/api_client/model/response_format_json_object.py index 89c227bd..0006c89a 100644 --- a/launch/api_client/model/get_docker_image_batch_job_v1_response.pyi +++ b/launch/api_client/model/response_format_json_object.py @@ -19,85 +19,80 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class GetDockerImageBatchJobV1Response(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class ResponseFormatJsonObject( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { - "status", + "type", } - + class properties: - @staticmethod - def status() -> typing.Type["BatchJobStatus"]: - return BatchJobStatus + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "json_object": "JSON_OBJECT", + } + + @schemas.classproperty + def JSON_OBJECT(cls): + return cls("json_object") __annotations__ = { - "status": status, + "type": type, } - status: "BatchJobStatus" - + + type: MetaOapg.properties.type + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["status",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "BatchJobStatus": ... + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["status",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - status: "BatchJobStatus", + *_args: typing.Union[dict, frozendict.frozendict, ], + type: typing.Union[MetaOapg.properties.type, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "GetDockerImageBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ResponseFormatJsonObject': return super().__new__( cls, *_args, - status=status, + type=type, _configuration=_configuration, **kwargs, ) - -from launch_client.model.batch_job_status import BatchJobStatus diff --git a/launch/api_client/model/response_format_json_schema.py b/launch/api_client/model/response_format_json_schema.py new file mode 100644 index 00000000..6173317a --- /dev/null +++ b/launch/api_client/model/response_format_json_schema.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ResponseFormatJsonSchema( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "json_schema", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "json_schema": "JSON_SCHEMA", + } + + @schemas.classproperty + def JSON_SCHEMA(cls): + return cls("json_schema") + + @staticmethod + def json_schema() -> typing.Type['JsonSchema']: + return JsonSchema + __annotations__ = { + "type": type, + "json_schema": json_schema, + } + + json_schema: 'JsonSchema' + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["json_schema"]) -> 'JsonSchema': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "json_schema", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["json_schema"]) -> 'JsonSchema': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "json_schema", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + json_schema: 'JsonSchema', + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ResponseFormatJsonSchema': + return super().__new__( + cls, + *_args, + json_schema=json_schema, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.json_schema import JsonSchema diff --git a/launch/api_client/model/response_format_json_schema_schema.py b/launch/api_client/model/response_format_json_schema_schema.py new file mode 100644 index 00000000..9f7aec5c --- /dev/null +++ b/launch/api_client/model/response_format_json_schema_schema.py @@ -0,0 +1,57 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ResponseFormatJsonSchemaSchema( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'ResponseFormatJsonSchemaSchema': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/delete_file_response.pyi b/launch/api_client/model/response_format_text.py similarity index 50% rename from launch/api_client/model/delete_file_response.pyi rename to launch/api_client/model/response_format_text.py index f4e999da..f9a2c24e 100644 --- a/launch/api_client/model/delete_file_response.pyi +++ b/launch/api_client/model/response_format_text.py @@ -19,86 +19,80 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class DeleteFileResponse(schemas.DictSchema): +from launch.api_client import schemas # noqa: F401 + + +class ResponseFormatText( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - Response object for deleting a file. """ + class MetaOapg: required = { - "deleted", + "type", } - + class properties: - deleted = schemas.BoolSchema + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "text": "TEXT", + } + + @schemas.classproperty + def TEXT(cls): + return cls("text") __annotations__ = { - "deleted": deleted, + "type": type, } - deleted: MetaOapg.properties.deleted - + + type: MetaOapg.properties.type + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + @typing.overload def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", ], str]): # dict_instance[name] accessor return super().__getitem__(name) + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["deleted"]) -> MetaOapg.properties.deleted: ... + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + @typing.overload def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["deleted",], - str, - ], - ): + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", ], str]): return super().get_item_oapg(name) + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - deleted: typing.Union[ - MetaOapg.properties.deleted, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + type: typing.Union[MetaOapg.properties.type, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "DeleteFileResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ResponseFormatText': return super().__new__( cls, *_args, - deleted=deleted, + type=type, _configuration=_configuration, **kwargs, ) diff --git a/launch/api_client/model/response_modalities.py b/launch/api_client/model/response_modalities.py new file mode 100644 index 00000000..7e7a7b41 --- /dev/null +++ b/launch/api_client/model/response_modalities.py @@ -0,0 +1,84 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ResponseModalities( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Output types that you would like the model to generate. +Most models are capable of generating text, which is the default: + +`["text"]` + +The `gpt-4o-audio-preview` model can also be used to +[generate audio](/docs/guides/audio). To request that this model generate +both text and audio responses, you can use: + +`["text", "audio"]` + + """ + + + class MetaOapg: + + + class items( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "text": "TEXT", + "audio": "AUDIO", + } + + @schemas.classproperty + def TEXT(cls): + return cls("text") + + @schemas.classproperty + def AUDIO(cls): + return cls("audio") + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ResponseModalities': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) diff --git a/launch/api_client/model/restart_model_endpoint_v1_response.py b/launch/api_client/model/restart_model_endpoint_v1_response.py new file mode 100644 index 00000000..9ba6e0a8 --- /dev/null +++ b/launch/api_client/model/restart_model_endpoint_v1_response.py @@ -0,0 +1,83 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class RestartModelEndpointV1Response( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "restarted", + } + + class properties: + restarted = schemas.BoolSchema + __annotations__ = { + "restarted": restarted, + } + + restarted: MetaOapg.properties.restarted + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["restarted"]) -> MetaOapg.properties.restarted: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["restarted", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["restarted"]) -> MetaOapg.properties.restarted: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["restarted", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + restarted: typing.Union[MetaOapg.properties.restarted, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'RestartModelEndpointV1Response': + return super().__new__( + cls, + *_args, + restarted=restarted, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/runnable_image_flavor.py b/launch/api_client/model/runnable_image_flavor.py index 0e05bb9f..46726cb4 100644 --- a/launch/api_client/model/runnable_image_flavor.py +++ b/launch/api_client/model/runnable_image_flavor.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class RunnableImageFlavor(schemas.DictSchema): +class RunnableImageFlavor( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class RunnableImageFlavor(schemas.DictSchema): This is the entity-layer class for the Model Bundle flavor of a runnable image. """ + class MetaOapg: required = { "flavor", @@ -40,301 +43,366 @@ class MetaOapg: "repository", "command", } - + class properties: - class command(schemas.ListSchema): + repository = schemas.StrSchema + tag = schemas.StrSchema + + + class command( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": + ) -> 'command': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - class flavor(schemas.EnumBase, schemas.StrSchema): + + + class protocol( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { - "runnable_image": "RUNNABLE_IMAGE", + "http": "HTTP", } - + @schemas.classproperty - def RUNNABLE_IMAGE(cls): - return cls("runnable_image") - - class protocol(schemas.EnumBase, schemas.StrSchema): + def HTTP(cls): + return cls("http") + + + class flavor( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { - "http": "HTTP", + "runnable_image": "RUNNABLE_IMAGE", } - + @schemas.classproperty - def HTTP(cls): - return cls("http") - - repository = schemas.StrSchema - tag = schemas.StrSchema - - class env(schemas.DictSchema): + def RUNNABLE_IMAGE(cls): + return cls("runnable_image") + predict_route = schemas.StrSchema + healthcheck_route = schemas.StrSchema + + + class env( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'env': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - healthcheck_route = schemas.StrSchema - predict_route = schemas.StrSchema readiness_initial_delay_seconds = schemas.IntSchema + + + class extra_routes( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'extra_routes': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + + class routes( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'routes': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + + class forwarder_type( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'forwarder_type': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class worker_command( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'worker_command': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class worker_env( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'worker_env': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) __annotations__ = { - "command": command, - "flavor": flavor, - "protocol": protocol, "repository": repository, "tag": tag, - "env": env, - "healthcheck_route": healthcheck_route, + "command": command, + "protocol": protocol, + "flavor": flavor, "predict_route": predict_route, + "healthcheck_route": healthcheck_route, + "env": env, "readiness_initial_delay_seconds": readiness_initial_delay_seconds, + "extra_routes": extra_routes, + "routes": routes, + "forwarder_type": forwarder_type, + "worker_command": worker_command, + "worker_env": worker_env, } - + flavor: MetaOapg.properties.flavor protocol: MetaOapg.properties.protocol tag: MetaOapg.properties.tag repository: MetaOapg.properties.repository command: MetaOapg.properties.command - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: - ... - + def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: - ... - + def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: - ... - + def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> MetaOapg.properties.healthcheck_route: - ... - + def __getitem__(self, name: typing_extensions.Literal["healthcheck_route"]) -> MetaOapg.properties.healthcheck_route: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: - ... - + def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.readiness_initial_delay_seconds: - ... - + def __getitem__(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> MetaOapg.properties.readiness_initial_delay_seconds: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["extra_routes"]) -> MetaOapg.properties.extra_routes: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["routes"]) -> MetaOapg.properties.routes: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["forwarder_type"]) -> MetaOapg.properties.forwarder_type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["worker_command"]) -> MetaOapg.properties.worker_command: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["worker_env"]) -> MetaOapg.properties.worker_env: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["predict_route"]) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["healthcheck_route"]) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["predict_route"] - ) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["extra_routes"]) -> typing.Union[MetaOapg.properties.extra_routes, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["routes"]) -> typing.Union[MetaOapg.properties.routes, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["forwarder_type"]) -> typing.Union[MetaOapg.properties.forwarder_type, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["worker_command"]) -> typing.Union[MetaOapg.properties.worker_command, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["worker_env"]) -> typing.Union[MetaOapg.properties.worker_env, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - protocol: typing.Union[ - MetaOapg.properties.protocol, - str, - ], - tag: typing.Union[ - MetaOapg.properties.tag, - str, - ], - repository: typing.Union[ - MetaOapg.properties.repository, - str, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + flavor: typing.Union[MetaOapg.properties.flavor, str, ], + protocol: typing.Union[MetaOapg.properties.protocol, str, ], + tag: typing.Union[MetaOapg.properties.tag, str, ], + repository: typing.Union[MetaOapg.properties.repository, str, ], + command: typing.Union[MetaOapg.properties.command, list, tuple, ], predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, + healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, + env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, + extra_routes: typing.Union[MetaOapg.properties.extra_routes, list, tuple, schemas.Unset] = schemas.unset, + routes: typing.Union[MetaOapg.properties.routes, list, tuple, schemas.Unset] = schemas.unset, + forwarder_type: typing.Union[MetaOapg.properties.forwarder_type, None, str, schemas.Unset] = schemas.unset, + worker_command: typing.Union[MetaOapg.properties.worker_command, list, tuple, None, schemas.Unset] = schemas.unset, + worker_env: typing.Union[MetaOapg.properties.worker_env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "RunnableImageFlavor": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'RunnableImageFlavor': return super().__new__( cls, *_args, @@ -343,10 +411,15 @@ def __new__( tag=tag, repository=repository, command=command, - env=env, - healthcheck_route=healthcheck_route, predict_route=predict_route, + healthcheck_route=healthcheck_route, + env=env, readiness_initial_delay_seconds=readiness_initial_delay_seconds, + extra_routes=extra_routes, + routes=routes, + forwarder_type=forwarder_type, + worker_command=worker_command, + worker_env=worker_env, _configuration=_configuration, **kwargs, ) diff --git a/launch/api_client/model/runnable_image_flavor.pyi b/launch/api_client/model/runnable_image_flavor.pyi deleted file mode 100644 index a441c80a..00000000 --- a/launch/api_client/model/runnable_image_flavor.pyi +++ /dev/null @@ -1,290 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class RunnableImageFlavor(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle flavor of a runnable image. - """ - - class MetaOapg: - required = { - "flavor", - "protocol", - "tag", - "repository", - "command", - } - - class properties: - class command(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class flavor(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def RUNNABLE_IMAGE(cls): - return cls("runnable_image") - - class protocol(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def HTTP(cls): - return cls("http") - repository = schemas.StrSchema - tag = schemas.StrSchema - - class env(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - healthcheck_route = schemas.StrSchema - predict_route = schemas.StrSchema - readiness_initial_delay_seconds = schemas.IntSchema - __annotations__ = { - "command": command, - "flavor": flavor, - "protocol": protocol, - "repository": repository, - "tag": tag, - "env": env, - "healthcheck_route": healthcheck_route, - "predict_route": predict_route, - "readiness_initial_delay_seconds": readiness_initial_delay_seconds, - } - flavor: MetaOapg.properties.flavor - protocol: MetaOapg.properties.protocol - tag: MetaOapg.properties.tag - repository: MetaOapg.properties.repository - command: MetaOapg.properties.command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> MetaOapg.properties.healthcheck_route: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.readiness_initial_delay_seconds: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["predict_route"] - ) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - protocol: typing.Union[ - MetaOapg.properties.protocol, - str, - ], - tag: typing.Union[ - MetaOapg.properties.tag, - str, - ], - repository: typing.Union[ - MetaOapg.properties.repository, - str, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, - predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "RunnableImageFlavor": - return super().__new__( - cls, - *_args, - flavor=flavor, - protocol=protocol, - tag=tag, - repository=repository, - command=command, - env=env, - healthcheck_route=healthcheck_route, - predict_route=predict_route, - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/service_tier.py b/launch/api_client/model/service_tier.py new file mode 100644 index 00000000..dc462fc7 --- /dev/null +++ b/launch/api_client/model/service_tier.py @@ -0,0 +1,80 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class ServiceTier( + schemas.EnumBase, + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service: + - If set to 'auto', and the Project is Scale tier enabled, the system + will utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. + - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing). + - When not set, the default behavior is 'auto'. + + When this parameter is set, the response body will include the `service_tier` utilized. + + """ + + + class MetaOapg: + enum_value_to_name = { + "auto": "AUTO", + "default": "DEFAULT", + "flex": "FLEX", + } + + @schemas.classproperty + def AUTO(cls): + return cls("auto") + + @schemas.classproperty + def DEFAULT(cls): + return cls("default") + + @schemas.classproperty + def FLEX(cls): + return cls("flex") + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ServiceTier': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) diff --git a/launch/api_client/model/stop_configuration.py b/launch/api_client/model/stop_configuration.py new file mode 100644 index 00000000..d69582b5 --- /dev/null +++ b/launch/api_client/model/stop_configuration.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class StopConfiguration( + schemas.ComposedSchema, +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Not supported with latest reasoning models `o3` and `o4-mini`. + +Up to 4 sequences where the API will stop generating further tokens. The +returned text will not contain the stop sequence. + + """ + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + StopConfiguration1, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'StopConfiguration': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.stop_configuration1 import StopConfiguration1 diff --git a/launch/api_client/model/stop_configuration1.py b/launch/api_client/model/stop_configuration1.py new file mode 100644 index 00000000..fb3241c0 --- /dev/null +++ b/launch/api_client/model/stop_configuration1.py @@ -0,0 +1,60 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class StopConfiguration1( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Not supported with latest reasoning models `o3` and `o4-mini`. + +Up to 4 sequences where the API will stop generating further tokens. The +returned text will not contain the stop sequence. + + """ + + + class MetaOapg: + items = schemas.StrSchema + max_items = 4 + min_items = 1 + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'StopConfiguration1': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) diff --git a/launch/api_client/model/stream_error.py b/launch/api_client/model/stream_error.py index 78dfabab..9e2c9c5e 100644 --- a/launch/api_client/model/stream_error.py +++ b/launch/api_client/model/stream_error.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class StreamError(schemas.DictSchema): +class StreamError( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,105 +34,62 @@ class StreamError(schemas.DictSchema): Error object for a stream prompt completion task. """ + class MetaOapg: required = { "status_code", "content", } - + class properties: + status_code = schemas.IntSchema + @staticmethod - def content() -> typing.Type["StreamErrorContent"]: + def content() -> typing.Type['StreamErrorContent']: return StreamErrorContent - - status_code = schemas.IntSchema __annotations__ = { - "content": content, "status_code": status_code, + "content": content, } - + status_code: MetaOapg.properties.status_code - content: "StreamErrorContent" - + content: 'StreamErrorContent' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> "StreamErrorContent": - ... - + def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: - ... - + def __getitem__(self, name: typing_extensions.Literal["content"]) -> 'StreamErrorContent': ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "status_code", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["status_code", "content", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> "StreamErrorContent": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> 'StreamErrorContent': ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "status_code", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status_code", "content", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - status_code: typing.Union[ - MetaOapg.properties.status_code, - decimal.Decimal, - int, - ], - content: "StreamErrorContent", + *_args: typing.Union[dict, frozendict.frozendict, ], + status_code: typing.Union[MetaOapg.properties.status_code, decimal.Decimal, int, ], + content: 'StreamErrorContent', _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "StreamError": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'StreamError': return super().__new__( cls, *_args, @@ -140,5 +99,4 @@ def __new__( **kwargs, ) - from launch.api_client.model.stream_error_content import StreamErrorContent diff --git a/launch/api_client/model/stream_error.pyi b/launch/api_client/model/stream_error.pyi deleted file mode 100644 index f57240bd..00000000 --- a/launch/api_client/model/stream_error.pyi +++ /dev/null @@ -1,125 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class StreamError(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Error object for a stream prompt completion task. - """ - - class MetaOapg: - required = { - "status_code", - "content", - } - - class properties: - @staticmethod - def content() -> typing.Type["StreamErrorContent"]: - return StreamErrorContent - status_code = schemas.IntSchema - __annotations__ = { - "content": content, - "status_code": status_code, - } - status_code: MetaOapg.properties.status_code - content: "StreamErrorContent" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["content"]) -> "StreamErrorContent": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "status_code", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["content"]) -> "StreamErrorContent": ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "content", - "status_code", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - status_code: typing.Union[ - MetaOapg.properties.status_code, - decimal.Decimal, - int, - ], - content: "StreamErrorContent", - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "StreamError": - return super().__new__( - cls, - *_args, - status_code=status_code, - content=content, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.stream_error_content import StreamErrorContent diff --git a/launch/api_client/model/stream_error_content.py b/launch/api_client/model/stream_error_content.py index 923f1082..41b7e854 100644 --- a/launch/api_client/model/stream_error_content.py +++ b/launch/api_client/model/stream_error_content.py @@ -23,19 +23,22 @@ from launch.api_client import schemas # noqa: F401 -class StreamErrorContent(schemas.DictSchema): +class StreamErrorContent( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "error", "timestamp", } - + class properties: error = schemas.StrSchema timestamp = schemas.StrSchema @@ -43,91 +46,45 @@ class properties: "error": error, "timestamp": timestamp, } - + error: MetaOapg.properties.error timestamp: MetaOapg.properties.timestamp - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: - ... - + def __getitem__(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: - ... - + def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "timestamp", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["error", "timestamp", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "timestamp", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["error", "timestamp", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - error: typing.Union[ - MetaOapg.properties.error, - str, - ], - timestamp: typing.Union[ - MetaOapg.properties.timestamp, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + error: typing.Union[MetaOapg.properties.error, str, ], + timestamp: typing.Union[MetaOapg.properties.timestamp, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "StreamErrorContent": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'StreamErrorContent': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/stream_error_content.pyi b/launch/api_client/model/stream_error_content.pyi deleted file mode 100644 index c42e5c50..00000000 --- a/launch/api_client/model/stream_error_content.pyi +++ /dev/null @@ -1,121 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class StreamErrorContent(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "error", - "timestamp", - } - - class properties: - error = schemas.StrSchema - timestamp = schemas.StrSchema - __annotations__ = { - "error": error, - "timestamp": timestamp, - } - error: MetaOapg.properties.error - timestamp: MetaOapg.properties.timestamp - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "timestamp", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["error"]) -> MetaOapg.properties.error: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["timestamp"]) -> MetaOapg.properties.timestamp: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "error", - "timestamp", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - error: typing.Union[ - MetaOapg.properties.error, - str, - ], - timestamp: typing.Union[ - MetaOapg.properties.timestamp, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "StreamErrorContent": - return super().__new__( - cls, - *_args, - error=error, - timestamp=timestamp, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/streaming_enhanced_runnable_image_flavor.py b/launch/api_client/model/streaming_enhanced_runnable_image_flavor.py index 70f33e25..1849bea3 100644 --- a/launch/api_client/model/streaming_enhanced_runnable_image_flavor.py +++ b/launch/api_client/model/streaming_enhanced_runnable_image_flavor.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class StreamingEnhancedRunnableImageFlavor(schemas.DictSchema): +class StreamingEnhancedRunnableImageFlavor( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class StreamingEnhancedRunnableImageFlavor(schemas.DictSchema): For deployments that expose a streaming route in a container. """ + class MetaOapg: required = { "flavor", @@ -40,370 +43,406 @@ class MetaOapg: "repository", "streaming_command", } - + class properties: - class flavor(schemas.EnumBase, schemas.StrSchema): + repository = schemas.StrSchema + tag = schemas.StrSchema + + + class protocol( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { - "streaming_enhanced_runnable_image": "STREAMING_ENHANCED_RUNNABLE_IMAGE", + "http": "HTTP", } - + @schemas.classproperty - def STREAMING_ENHANCED_RUNNABLE_IMAGE(cls): - return cls("streaming_enhanced_runnable_image") - - class protocol(schemas.EnumBase, schemas.StrSchema): + def HTTP(cls): + return cls("http") + + + class flavor( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { - "http": "HTTP", + "streaming_enhanced_runnable_image": "STREAMING_ENHANCED_RUNNABLE_IMAGE", } - + @schemas.classproperty - def HTTP(cls): - return cls("http") - - repository = schemas.StrSchema - - class streaming_command(schemas.ListSchema): + def STREAMING_ENHANCED_RUNNABLE_IMAGE(cls): + return cls("streaming_enhanced_runnable_image") + + + class streaming_command( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "streaming_command": + ) -> 'streaming_command': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - tag = schemas.StrSchema - - class command(schemas.ListSchema): + + + class command( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": + ) -> 'command': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - class env(schemas.DictSchema): + predict_route = schemas.StrSchema + healthcheck_route = schemas.StrSchema + + + class env( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'env': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - healthcheck_route = schemas.StrSchema - predict_route = schemas.StrSchema readiness_initial_delay_seconds = schemas.IntSchema + + + class extra_routes( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'extra_routes': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + + class routes( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'routes': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + + class forwarder_type( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'forwarder_type': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class worker_command( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'worker_command': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class worker_env( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'worker_env': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) streaming_predict_route = schemas.StrSchema __annotations__ = { - "flavor": flavor, - "protocol": protocol, "repository": repository, - "streaming_command": streaming_command, "tag": tag, + "protocol": protocol, + "flavor": flavor, + "streaming_command": streaming_command, "command": command, - "env": env, - "healthcheck_route": healthcheck_route, "predict_route": predict_route, + "healthcheck_route": healthcheck_route, + "env": env, "readiness_initial_delay_seconds": readiness_initial_delay_seconds, + "extra_routes": extra_routes, + "routes": routes, + "forwarder_type": forwarder_type, + "worker_command": worker_command, + "worker_env": worker_env, "streaming_predict_route": streaming_predict_route, } - + flavor: MetaOapg.properties.flavor protocol: MetaOapg.properties.protocol tag: MetaOapg.properties.tag repository: MetaOapg.properties.repository streaming_command: MetaOapg.properties.streaming_command - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: - ... - + def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: - ... - + def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["streaming_command"] - ) -> MetaOapg.properties.streaming_command: - ... - + def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["streaming_command"]) -> MetaOapg.properties.streaming_command: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: - ... - + def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> MetaOapg.properties.healthcheck_route: - ... - + def __getitem__(self, name: typing_extensions.Literal["healthcheck_route"]) -> MetaOapg.properties.healthcheck_route: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: - ... - + def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.readiness_initial_delay_seconds: - ... - + def __getitem__(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> MetaOapg.properties.readiness_initial_delay_seconds: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["streaming_predict_route"] - ) -> MetaOapg.properties.streaming_predict_route: - ... - + def __getitem__(self, name: typing_extensions.Literal["extra_routes"]) -> MetaOapg.properties.extra_routes: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "protocol", - "repository", - "streaming_command", - "tag", - "command", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "streaming_predict_route", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["routes"]) -> MetaOapg.properties.routes: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["forwarder_type"]) -> MetaOapg.properties.forwarder_type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["worker_command"]) -> MetaOapg.properties.worker_command: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["worker_env"]) -> MetaOapg.properties.worker_env: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["streaming_predict_route"]) -> MetaOapg.properties.streaming_predict_route: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "protocol", "flavor", "streaming_command", "command", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "streaming_predict_route", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["streaming_command"] - ) -> MetaOapg.properties.streaming_command: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["streaming_command"]) -> MetaOapg.properties.streaming_command: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["command"] - ) -> typing.Union[MetaOapg.properties.command, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> typing.Union[MetaOapg.properties.command, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["predict_route"]) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["healthcheck_route"]) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["predict_route"] - ) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["streaming_predict_route"] - ) -> typing.Union[MetaOapg.properties.streaming_predict_route, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["extra_routes"]) -> typing.Union[MetaOapg.properties.extra_routes, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "protocol", - "repository", - "streaming_command", - "tag", - "command", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "streaming_predict_route", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["routes"]) -> typing.Union[MetaOapg.properties.routes, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["forwarder_type"]) -> typing.Union[MetaOapg.properties.forwarder_type, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["worker_command"]) -> typing.Union[MetaOapg.properties.worker_command, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["worker_env"]) -> typing.Union[MetaOapg.properties.worker_env, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["streaming_predict_route"]) -> typing.Union[MetaOapg.properties.streaming_predict_route, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "protocol", "flavor", "streaming_command", "command", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "streaming_predict_route", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - protocol: typing.Union[ - MetaOapg.properties.protocol, - str, - ], - tag: typing.Union[ - MetaOapg.properties.tag, - str, - ], - repository: typing.Union[ - MetaOapg.properties.repository, - str, - ], - streaming_command: typing.Union[ - MetaOapg.properties.streaming_command, - list, - tuple, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + flavor: typing.Union[MetaOapg.properties.flavor, str, ], + protocol: typing.Union[MetaOapg.properties.protocol, str, ], + tag: typing.Union[MetaOapg.properties.tag, str, ], + repository: typing.Union[MetaOapg.properties.repository, str, ], + streaming_command: typing.Union[MetaOapg.properties.streaming_command, list, tuple, ], command: typing.Union[MetaOapg.properties.command, list, tuple, schemas.Unset] = schemas.unset, - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - streaming_predict_route: typing.Union[ - MetaOapg.properties.streaming_predict_route, str, schemas.Unset - ] = schemas.unset, + healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, + env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, + extra_routes: typing.Union[MetaOapg.properties.extra_routes, list, tuple, schemas.Unset] = schemas.unset, + routes: typing.Union[MetaOapg.properties.routes, list, tuple, schemas.Unset] = schemas.unset, + forwarder_type: typing.Union[MetaOapg.properties.forwarder_type, None, str, schemas.Unset] = schemas.unset, + worker_command: typing.Union[MetaOapg.properties.worker_command, list, tuple, None, schemas.Unset] = schemas.unset, + worker_env: typing.Union[MetaOapg.properties.worker_env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + streaming_predict_route: typing.Union[MetaOapg.properties.streaming_predict_route, str, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "StreamingEnhancedRunnableImageFlavor": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'StreamingEnhancedRunnableImageFlavor': return super().__new__( cls, *_args, @@ -413,10 +452,15 @@ def __new__( repository=repository, streaming_command=streaming_command, command=command, - env=env, - healthcheck_route=healthcheck_route, predict_route=predict_route, + healthcheck_route=healthcheck_route, + env=env, readiness_initial_delay_seconds=readiness_initial_delay_seconds, + extra_routes=extra_routes, + routes=routes, + forwarder_type=forwarder_type, + worker_command=worker_command, + worker_env=worker_env, streaming_predict_route=streaming_predict_route, _configuration=_configuration, **kwargs, diff --git a/launch/api_client/model/streaming_enhanced_runnable_image_flavor.pyi b/launch/api_client/model/streaming_enhanced_runnable_image_flavor.pyi deleted file mode 100644 index 73d042e8..00000000 --- a/launch/api_client/model/streaming_enhanced_runnable_image_flavor.pyi +++ /dev/null @@ -1,350 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class StreamingEnhancedRunnableImageFlavor(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - For deployments that expose a streaming route in a container. - """ - - class MetaOapg: - required = { - "flavor", - "protocol", - "tag", - "repository", - "streaming_command", - } - - class properties: - class flavor(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def STREAMING_ENHANCED_RUNNABLE_IMAGE(cls): - return cls("streaming_enhanced_runnable_image") - - class protocol(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def HTTP(cls): - return cls("http") - repository = schemas.StrSchema - - class streaming_command(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "streaming_command": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - tag = schemas.StrSchema - - class command(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class env(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - healthcheck_route = schemas.StrSchema - predict_route = schemas.StrSchema - readiness_initial_delay_seconds = schemas.IntSchema - streaming_predict_route = schemas.StrSchema - __annotations__ = { - "flavor": flavor, - "protocol": protocol, - "repository": repository, - "streaming_command": streaming_command, - "tag": tag, - "command": command, - "env": env, - "healthcheck_route": healthcheck_route, - "predict_route": predict_route, - "readiness_initial_delay_seconds": readiness_initial_delay_seconds, - "streaming_predict_route": streaming_predict_route, - } - flavor: MetaOapg.properties.flavor - protocol: MetaOapg.properties.protocol - tag: MetaOapg.properties.tag - repository: MetaOapg.properties.repository - streaming_command: MetaOapg.properties.streaming_command - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["streaming_command"] - ) -> MetaOapg.properties.streaming_command: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> MetaOapg.properties.healthcheck_route: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.readiness_initial_delay_seconds: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["streaming_predict_route"] - ) -> MetaOapg.properties.streaming_predict_route: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "protocol", - "repository", - "streaming_command", - "tag", - "command", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "streaming_predict_route", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["streaming_command"] - ) -> MetaOapg.properties.streaming_command: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["command"] - ) -> typing.Union[MetaOapg.properties.command, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["predict_route"] - ) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["streaming_predict_route"] - ) -> typing.Union[MetaOapg.properties.streaming_predict_route, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "protocol", - "repository", - "streaming_command", - "tag", - "command", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "streaming_predict_route", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - protocol: typing.Union[ - MetaOapg.properties.protocol, - str, - ], - tag: typing.Union[ - MetaOapg.properties.tag, - str, - ], - repository: typing.Union[ - MetaOapg.properties.repository, - str, - ], - streaming_command: typing.Union[ - MetaOapg.properties.streaming_command, - list, - tuple, - ], - command: typing.Union[MetaOapg.properties.command, list, tuple, schemas.Unset] = schemas.unset, - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, - predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - streaming_predict_route: typing.Union[ - MetaOapg.properties.streaming_predict_route, str, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "StreamingEnhancedRunnableImageFlavor": - return super().__new__( - cls, - *_args, - flavor=flavor, - protocol=protocol, - tag=tag, - repository=repository, - streaming_command=streaming_command, - command=command, - env=env, - healthcheck_route=healthcheck_route, - predict_route=predict_route, - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - streaming_predict_route=streaming_predict_route, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/sync_endpoint_predict_v1_request.py b/launch/api_client/model/sync_endpoint_predict_v1_request.py index fa779151..dd5f3cb8 100644 --- a/launch/api_client/model/sync_endpoint_predict_v1_request.py +++ b/launch/api_client/model/sync_endpoint_predict_v1_request.py @@ -23,234 +23,259 @@ from launch.api_client import schemas # noqa: F401 -class SyncEndpointPredictV1Request(schemas.DictSchema): +class SyncEndpointPredictV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: + class properties: + + + class url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) args = schemas.AnyTypeSchema - + + + class cloudpickle( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cloudpickle': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + @staticmethod - def callback_auth() -> typing.Type["CallbackAuth"]: + def callback_auth() -> typing.Type['CallbackAuth']: return CallbackAuth - - callback_url = schemas.StrSchema - cloudpickle = schemas.StrSchema - - class num_retries(schemas.IntSchema): + return_pickled = schemas.BoolSchema + + + class destination_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'destination_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class timeout_seconds( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'timeout_seconds': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_retries( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_minimum = 0 - - return_pickled = schemas.BoolSchema - timeout_seconds = schemas.NumberSchema - url = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_retries': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { + "url": url, "args": args, - "callback_auth": callback_auth, - "callback_url": callback_url, "cloudpickle": cloudpickle, - "num_retries": num_retries, + "callback_url": callback_url, + "callback_auth": callback_auth, "return_pickled": return_pickled, + "destination_path": destination_path, "timeout_seconds": timeout_seconds, - "url": url, + "num_retries": num_retries, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: - ... - + def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> "CallbackAuth": - ... - + def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: - ... - + def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: - ... - + def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_retries"]) -> MetaOapg.properties.num_retries: - ... - + def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> 'CallbackAuth': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: - ... - + def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["timeout_seconds"]) -> MetaOapg.properties.timeout_seconds: - ... - + def __getitem__(self, name: typing_extensions.Literal["destination_path"]) -> MetaOapg.properties.destination_path: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: - ... - + def __getitem__(self, name: typing_extensions.Literal["timeout_seconds"]) -> MetaOapg.properties.timeout_seconds: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "num_retries", - "return_pickled", - "timeout_seconds", - "url", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["num_retries"]) -> MetaOapg.properties.num_retries: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", "timeout_seconds", "num_retries", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["args"] - ) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["args"]) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_url"] - ) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cloudpickle"]) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cloudpickle"] - ) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["callback_url"]) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_retries"] - ) -> typing.Union[MetaOapg.properties.num_retries, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_pickled"] - ) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["return_pickled"]) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["timeout_seconds"] - ) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["destination_path"]) -> typing.Union[MetaOapg.properties.destination_path, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["url"] - ) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["timeout_seconds"]) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "num_retries", - "return_pickled", - "timeout_seconds", - "url", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["num_retries"]) -> typing.Union[MetaOapg.properties.num_retries, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["url", "args", "cloudpickle", "callback_url", "callback_auth", "return_pickled", "destination_path", "timeout_seconds", "num_retries", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - args: typing.Union[ - MetaOapg.properties.args, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - callback_url: typing.Union[MetaOapg.properties.callback_url, str, schemas.Unset] = schemas.unset, - cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, str, schemas.Unset] = schemas.unset, - num_retries: typing.Union[MetaOapg.properties.num_retries, decimal.Decimal, int, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + url: typing.Union[MetaOapg.properties.url, None, str, schemas.Unset] = schemas.unset, + args: typing.Union[MetaOapg.properties.args, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, None, str, schemas.Unset] = schemas.unset, + callback_url: typing.Union[MetaOapg.properties.callback_url, None, str, schemas.Unset] = schemas.unset, + callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, return_pickled: typing.Union[MetaOapg.properties.return_pickled, bool, schemas.Unset] = schemas.unset, - timeout_seconds: typing.Union[ - MetaOapg.properties.timeout_seconds, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - url: typing.Union[MetaOapg.properties.url, str, schemas.Unset] = schemas.unset, + destination_path: typing.Union[MetaOapg.properties.destination_path, None, str, schemas.Unset] = schemas.unset, + timeout_seconds: typing.Union[MetaOapg.properties.timeout_seconds, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + num_retries: typing.Union[MetaOapg.properties.num_retries, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "SyncEndpointPredictV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'SyncEndpointPredictV1Request': return super().__new__( cls, *_args, + url=url, args=args, - callback_auth=callback_auth, - callback_url=callback_url, cloudpickle=cloudpickle, - num_retries=num_retries, + callback_url=callback_url, + callback_auth=callback_auth, return_pickled=return_pickled, + destination_path=destination_path, timeout_seconds=timeout_seconds, - url=url, + num_retries=num_retries, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.callback_auth import CallbackAuth diff --git a/launch/api_client/model/sync_endpoint_predict_v1_request.pyi b/launch/api_client/model/sync_endpoint_predict_v1_request.pyi deleted file mode 100644 index 7249cd99..00000000 --- a/launch/api_client/model/sync_endpoint_predict_v1_request.pyi +++ /dev/null @@ -1,213 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class SyncEndpointPredictV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - args = schemas.AnyTypeSchema - - @staticmethod - def callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - callback_url = schemas.StrSchema - cloudpickle = schemas.StrSchema - - class num_retries(schemas.IntSchema): - pass - return_pickled = schemas.BoolSchema - timeout_seconds = schemas.NumberSchema - url = schemas.StrSchema - __annotations__ = { - "args": args, - "callback_auth": callback_auth, - "callback_url": callback_url, - "cloudpickle": cloudpickle, - "num_retries": num_retries, - "return_pickled": return_pickled, - "timeout_seconds": timeout_seconds, - "url": url, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["args"]) -> MetaOapg.properties.args: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_auth"]) -> "CallbackAuth": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["callback_url"]) -> MetaOapg.properties.callback_url: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cloudpickle"]) -> MetaOapg.properties.cloudpickle: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_retries"]) -> MetaOapg.properties.num_retries: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["return_pickled"]) -> MetaOapg.properties.return_pickled: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["timeout_seconds"] - ) -> MetaOapg.properties.timeout_seconds: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "num_retries", - "return_pickled", - "timeout_seconds", - "url", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["args"] - ) -> typing.Union[MetaOapg.properties.args, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["callback_url"] - ) -> typing.Union[MetaOapg.properties.callback_url, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cloudpickle"] - ) -> typing.Union[MetaOapg.properties.cloudpickle, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_retries"] - ) -> typing.Union[MetaOapg.properties.num_retries, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["return_pickled"] - ) -> typing.Union[MetaOapg.properties.return_pickled, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["timeout_seconds"] - ) -> typing.Union[MetaOapg.properties.timeout_seconds, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["url"] - ) -> typing.Union[MetaOapg.properties.url, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "args", - "callback_auth", - "callback_url", - "cloudpickle", - "num_retries", - "return_pickled", - "timeout_seconds", - "url", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - args: typing.Union[ - MetaOapg.properties.args, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - callback_url: typing.Union[MetaOapg.properties.callback_url, str, schemas.Unset] = schemas.unset, - cloudpickle: typing.Union[MetaOapg.properties.cloudpickle, str, schemas.Unset] = schemas.unset, - num_retries: typing.Union[MetaOapg.properties.num_retries, decimal.Decimal, int, schemas.Unset] = schemas.unset, - return_pickled: typing.Union[MetaOapg.properties.return_pickled, bool, schemas.Unset] = schemas.unset, - timeout_seconds: typing.Union[ - MetaOapg.properties.timeout_seconds, decimal.Decimal, int, float, schemas.Unset - ] = schemas.unset, - url: typing.Union[MetaOapg.properties.url, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "SyncEndpointPredictV1Request": - return super().__new__( - cls, - *_args, - args=args, - callback_auth=callback_auth, - callback_url=callback_url, - cloudpickle=cloudpickle, - num_retries=num_retries, - return_pickled=return_pickled, - timeout_seconds=timeout_seconds, - url=url, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.callback_auth import CallbackAuth diff --git a/launch/api_client/model/sync_endpoint_predict_v1_response.py b/launch/api_client/model/sync_endpoint_predict_v1_response.py index e3753b5e..94c526fc 100644 --- a/launch/api_client/model/sync_endpoint_predict_v1_response.py +++ b/launch/api_client/model/sync_endpoint_predict_v1_response.py @@ -23,151 +23,134 @@ from launch.api_client import schemas # noqa: F401 -class SyncEndpointPredictV1Response(schemas.DictSchema): +class SyncEndpointPredictV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "status", } - + class properties: + @staticmethod - def status() -> typing.Type["TaskStatus"]: + def status() -> typing.Type['TaskStatus']: return TaskStatus - result = schemas.AnyTypeSchema - traceback = schemas.StrSchema + + + class traceback( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'traceback': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class status_code( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'status_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "status": status, "result": result, "traceback": traceback, + "status_code": status_code, } - - status: "TaskStatus" - + + status: 'TaskStatus' + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": - ... - + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: - ... - + def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: - ... - + def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "result", - "traceback", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["status_code"]) -> MetaOapg.properties.status_code: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["status", "result", "traceback", "status_code", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": - ... - + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'TaskStatus': ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["result"] - ) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["result"]) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["traceback"] - ) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["traceback"]) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "result", - "traceback", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["status_code"]) -> typing.Union[MetaOapg.properties.status_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["status", "result", "traceback", "status_code", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - status: "TaskStatus", - result: typing.Union[ - MetaOapg.properties.result, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - traceback: typing.Union[MetaOapg.properties.traceback, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + status: 'TaskStatus', + result: typing.Union[MetaOapg.properties.result, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + traceback: typing.Union[MetaOapg.properties.traceback, None, str, schemas.Unset] = schemas.unset, + status_code: typing.Union[MetaOapg.properties.status_code, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "SyncEndpointPredictV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'SyncEndpointPredictV1Response': return super().__new__( cls, *_args, status=status, result=result, traceback=traceback, + status_code=status_code, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.task_status import TaskStatus diff --git a/launch/api_client/model/sync_endpoint_predict_v1_response.pyi b/launch/api_client/model/sync_endpoint_predict_v1_response.pyi deleted file mode 100644 index 49ae96fa..00000000 --- a/launch/api_client/model/sync_endpoint_predict_v1_response.pyi +++ /dev/null @@ -1,150 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class SyncEndpointPredictV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "status", - } - - class properties: - @staticmethod - def status() -> typing.Type["TaskStatus"]: - return TaskStatus - result = schemas.AnyTypeSchema - traceback = schemas.StrSchema - __annotations__ = { - "status": status, - "result": result, - "traceback": traceback, - } - status: "TaskStatus" - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["result"]) -> MetaOapg.properties.result: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["traceback"]) -> MetaOapg.properties.traceback: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "result", - "traceback", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> "TaskStatus": ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["result"] - ) -> typing.Union[MetaOapg.properties.result, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["traceback"] - ) -> typing.Union[MetaOapg.properties.traceback, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "status", - "result", - "traceback", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - status: "TaskStatus", - result: typing.Union[ - MetaOapg.properties.result, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - traceback: typing.Union[MetaOapg.properties.traceback, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "SyncEndpointPredictV1Response": - return super().__new__( - cls, - *_args, - status=status, - result=result, - traceback=traceback, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.task_status import TaskStatus diff --git a/launch/api_client/model/task_status.py b/launch/api_client/model/task_status.py index 9693fad1..f4a30cae 100644 --- a/launch/api_client/model/task_status.py +++ b/launch/api_client/model/task_status.py @@ -23,15 +23,17 @@ from launch.api_client import schemas # noqa: F401 -class TaskStatus(schemas.EnumBase, schemas.StrSchema): +class TaskStatus( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - - An enumeration. """ + class MetaOapg: enum_value_to_name = { "PENDING": "PENDING", @@ -40,23 +42,23 @@ class MetaOapg: "FAILURE": "FAILURE", "UNDEFINED": "UNDEFINED", } - + @schemas.classproperty def PENDING(cls): return cls("PENDING") - + @schemas.classproperty def STARTED(cls): return cls("STARTED") - + @schemas.classproperty def SUCCESS(cls): return cls("SUCCESS") - + @schemas.classproperty def FAILURE(cls): return cls("FAILURE") - + @schemas.classproperty def UNDEFINED(cls): return cls("UNDEFINED") diff --git a/launch/api_client/model/task_status.pyi b/launch/api_client/model/task_status.pyi deleted file mode 100644 index d81b166f..00000000 --- a/launch/api_client/model/task_status.pyi +++ /dev/null @@ -1,47 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class TaskStatus(schemas.EnumBase, schemas.StrSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - An enumeration. - """ - - @schemas.classproperty - def PENDING(cls): - return cls("PENDING") - @schemas.classproperty - def STARTED(cls): - return cls("STARTED") - @schemas.classproperty - def SUCCESS(cls): - return cls("SUCCESS") - @schemas.classproperty - def FAILURE(cls): - return cls("FAILURE") - @schemas.classproperty - def UNDEFINED(cls): - return cls("UNDEFINED") diff --git a/launch/api_client/model/tensorflow_framework.py b/launch/api_client/model/tensorflow_framework.py index f7a0d24a..433503f7 100644 --- a/launch/api_client/model/tensorflow_framework.py +++ b/launch/api_client/model/tensorflow_framework.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class TensorflowFramework(schemas.DictSchema): +class TensorflowFramework( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,117 +34,74 @@ class TensorflowFramework(schemas.DictSchema): This is the entity-layer class for a Tensorflow framework specification. """ + class MetaOapg: required = { "tensorflow_version", "framework_type", } - + class properties: - class framework_type(schemas.EnumBase, schemas.StrSchema): + + + class framework_type( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { "tensorflow": "TENSORFLOW", } - + @schemas.classproperty def TENSORFLOW(cls): return cls("tensorflow") - tensorflow_version = schemas.StrSchema __annotations__ = { "framework_type": framework_type, "tensorflow_version": tensorflow_version, } - + tensorflow_version: MetaOapg.properties.tensorflow_version framework_type: MetaOapg.properties.framework_type - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: - ... - + def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> MetaOapg.properties.tensorflow_version: - ... - + def __getitem__(self, name: typing_extensions.Literal["tensorflow_version"]) -> MetaOapg.properties.tensorflow_version: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "tensorflow_version", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["framework_type", "tensorflow_version", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> MetaOapg.properties.tensorflow_version: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["tensorflow_version"]) -> MetaOapg.properties.tensorflow_version: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "tensorflow_version", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["framework_type", "tensorflow_version", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - tensorflow_version: typing.Union[ - MetaOapg.properties.tensorflow_version, - str, - ], - framework_type: typing.Union[ - MetaOapg.properties.framework_type, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + tensorflow_version: typing.Union[MetaOapg.properties.tensorflow_version, str, ], + framework_type: typing.Union[MetaOapg.properties.framework_type, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "TensorflowFramework": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'TensorflowFramework': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/tensorflow_framework.pyi b/launch/api_client/model/tensorflow_framework.pyi deleted file mode 100644 index 9cf421c8..00000000 --- a/launch/api_client/model/tensorflow_framework.pyi +++ /dev/null @@ -1,132 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class TensorflowFramework(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for a Tensorflow framework specification. - """ - - class MetaOapg: - required = { - "tensorflow_version", - "framework_type", - } - - class properties: - class framework_type(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def TENSORFLOW(cls): - return cls("tensorflow") - tensorflow_version = schemas.StrSchema - __annotations__ = { - "framework_type": framework_type, - "tensorflow_version": tensorflow_version, - } - tensorflow_version: MetaOapg.properties.tensorflow_version - framework_type: MetaOapg.properties.framework_type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework_type"]) -> MetaOapg.properties.framework_type: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> MetaOapg.properties.tensorflow_version: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "tensorflow_version", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["framework_type"] - ) -> MetaOapg.properties.framework_type: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["tensorflow_version"] - ) -> MetaOapg.properties.tensorflow_version: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "framework_type", - "tensorflow_version", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - tensorflow_version: typing.Union[ - MetaOapg.properties.tensorflow_version, - str, - ], - framework_type: typing.Union[ - MetaOapg.properties.framework_type, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "TensorflowFramework": - return super().__new__( - cls, - *_args, - tensorflow_version=tensorflow_version, - framework_type=framework_type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/token_output.py b/launch/api_client/model/token_output.py index 0ccc91c4..4c340d96 100644 --- a/launch/api_client/model/token_output.py +++ b/launch/api_client/model/token_output.py @@ -23,113 +23,70 @@ from launch.api_client import schemas # noqa: F401 -class TokenOutput(schemas.DictSchema): +class TokenOutput( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. + + Detailed token information. """ + class MetaOapg: required = { "log_prob", "token", } - + class properties: - log_prob = schemas.NumberSchema token = schemas.StrSchema + log_prob = schemas.NumberSchema __annotations__ = { - "log_prob": log_prob, "token": token, + "log_prob": log_prob, } - + log_prob: MetaOapg.properties.log_prob token: MetaOapg.properties.token - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: - ... - + def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: - ... - + def __getitem__(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "log_prob", - "token", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["token", "log_prob", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "log_prob", - "token", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["token", "log_prob", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - log_prob: typing.Union[ - MetaOapg.properties.log_prob, - decimal.Decimal, - int, - float, - ], - token: typing.Union[ - MetaOapg.properties.token, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + log_prob: typing.Union[MetaOapg.properties.log_prob, decimal.Decimal, int, float, ], + token: typing.Union[MetaOapg.properties.token, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "TokenOutput": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'TokenOutput': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/token_output.pyi b/launch/api_client/model/token_output.pyi deleted file mode 100644 index 90958c20..00000000 --- a/launch/api_client/model/token_output.pyi +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class TokenOutput(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "log_prob", - "token", - } - - class properties: - log_prob = schemas.NumberSchema - token = schemas.StrSchema - __annotations__ = { - "log_prob": log_prob, - "token": token, - } - log_prob: MetaOapg.properties.log_prob - token: MetaOapg.properties.token - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "log_prob", - "token", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["log_prob"]) -> MetaOapg.properties.log_prob: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "log_prob", - "token", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - log_prob: typing.Union[ - MetaOapg.properties.log_prob, - decimal.Decimal, - int, - float, - ], - token: typing.Union[ - MetaOapg.properties.token, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "TokenOutput": - return super().__new__( - cls, - *_args, - log_prob=log_prob, - token=token, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/tool_config.py b/launch/api_client/model/tool_config.py index 5d1a633c..d3101fd9 100644 --- a/launch/api_client/model/tool_config.py +++ b/launch/api_client/model/tool_config.py @@ -23,157 +23,150 @@ from launch.api_client import schemas # noqa: F401 -class ToolConfig(schemas.DictSchema): +class ToolConfig( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech + Ref: https://openapi-generator.tech - Do not edit the class manually. + Do not edit the class manually. - Configuration for tool use. - NOTE: this config is highly experimental and signature will change significantly in future iterations. + Configuration for tool use. +NOTE: this config is highly experimental and signature will change significantly in future iterations. """ + class MetaOapg: required = { "name", } - + class properties: name = schemas.StrSchema - execution_timeout_seconds = schemas.IntSchema - max_iterations = schemas.IntSchema - should_retry_on_error = schemas.BoolSchema + + + class max_iterations( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_iterations': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class execution_timeout_seconds( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'execution_timeout_seconds': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class should_retry_on_error( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'should_retry_on_error': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "name": name, - "execution_timeout_seconds": execution_timeout_seconds, "max_iterations": max_iterations, + "execution_timeout_seconds": execution_timeout_seconds, "should_retry_on_error": should_retry_on_error, } - + name: MetaOapg.properties.name - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["execution_timeout_seconds"] - ) -> MetaOapg.properties.execution_timeout_seconds: - ... - + def __getitem__(self, name: typing_extensions.Literal["max_iterations"]) -> MetaOapg.properties.max_iterations: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_iterations"]) -> MetaOapg.properties.max_iterations: - ... - + def __getitem__(self, name: typing_extensions.Literal["execution_timeout_seconds"]) -> MetaOapg.properties.execution_timeout_seconds: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["should_retry_on_error"] - ) -> MetaOapg.properties.should_retry_on_error: - ... - + def __getitem__(self, name: typing_extensions.Literal["should_retry_on_error"]) -> MetaOapg.properties.should_retry_on_error: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "name", - "execution_timeout_seconds", - "max_iterations", - "should_retry_on_error", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["name", "max_iterations", "execution_timeout_seconds", "should_retry_on_error", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["execution_timeout_seconds"] - ) -> typing.Union[MetaOapg.properties.execution_timeout_seconds, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["max_iterations"]) -> typing.Union[MetaOapg.properties.max_iterations, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_iterations"] - ) -> typing.Union[MetaOapg.properties.max_iterations, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["execution_timeout_seconds"]) -> typing.Union[MetaOapg.properties.execution_timeout_seconds, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["should_retry_on_error"] - ) -> typing.Union[MetaOapg.properties.should_retry_on_error, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["should_retry_on_error"]) -> typing.Union[MetaOapg.properties.should_retry_on_error, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "name", - "execution_timeout_seconds", - "max_iterations", - "should_retry_on_error", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["name", "max_iterations", "execution_timeout_seconds", "should_retry_on_error", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - execution_timeout_seconds: typing.Union[ - MetaOapg.properties.execution_timeout_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - max_iterations: typing.Union[ - MetaOapg.properties.max_iterations, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - should_retry_on_error: typing.Union[ - MetaOapg.properties.should_retry_on_error, bool, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + name: typing.Union[MetaOapg.properties.name, str, ], + max_iterations: typing.Union[MetaOapg.properties.max_iterations, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + execution_timeout_seconds: typing.Union[MetaOapg.properties.execution_timeout_seconds, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + should_retry_on_error: typing.Union[MetaOapg.properties.should_retry_on_error, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ToolConfig": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ToolConfig': return super().__new__( cls, *_args, name=name, - execution_timeout_seconds=execution_timeout_seconds, max_iterations=max_iterations, + execution_timeout_seconds=execution_timeout_seconds, should_retry_on_error=should_retry_on_error, _configuration=_configuration, **kwargs, diff --git a/launch/api_client/model/tool_config.pyi b/launch/api_client/model/tool_config.pyi deleted file mode 100644 index c981480a..00000000 --- a/launch/api_client/model/tool_config.pyi +++ /dev/null @@ -1,155 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ToolConfig(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Configuration for tool use. - NOTE: this config is highly experimental and signature will change significantly in future iterations. - """ - - class MetaOapg: - required = { - "name", - } - - class properties: - name = schemas.StrSchema - execution_timeout_seconds = schemas.IntSchema - max_iterations = schemas.IntSchema - should_retry_on_error = schemas.BoolSchema - __annotations__ = { - "name": name, - "execution_timeout_seconds": execution_timeout_seconds, - "max_iterations": max_iterations, - "should_retry_on_error": should_retry_on_error, - } - name: MetaOapg.properties.name - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["execution_timeout_seconds"] - ) -> MetaOapg.properties.execution_timeout_seconds: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_iterations"]) -> MetaOapg.properties.max_iterations: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["should_retry_on_error"] - ) -> MetaOapg.properties.should_retry_on_error: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "name", - "execution_timeout_seconds", - "max_iterations", - "should_retry_on_error", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["name"]) -> MetaOapg.properties.name: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["execution_timeout_seconds"] - ) -> typing.Union[MetaOapg.properties.execution_timeout_seconds, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_iterations"] - ) -> typing.Union[MetaOapg.properties.max_iterations, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["should_retry_on_error"] - ) -> typing.Union[MetaOapg.properties.should_retry_on_error, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "name", - "execution_timeout_seconds", - "max_iterations", - "should_retry_on_error", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - name: typing.Union[ - MetaOapg.properties.name, - str, - ], - execution_timeout_seconds: typing.Union[ - MetaOapg.properties.execution_timeout_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - max_iterations: typing.Union[ - MetaOapg.properties.max_iterations, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - should_retry_on_error: typing.Union[ - MetaOapg.properties.should_retry_on_error, bool, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ToolConfig": - return super().__new__( - cls, - *_args, - name=name, - execution_timeout_seconds=execution_timeout_seconds, - max_iterations=max_iterations, - should_retry_on_error=should_retry_on_error, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/top_logprob.py b/launch/api_client/model/top_logprob.py new file mode 100644 index 00000000..f53fc266 --- /dev/null +++ b/launch/api_client/model/top_logprob.py @@ -0,0 +1,130 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class TopLogprob( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "logprob", + "bytes", + "token", + } + + class properties: + token = schemas.StrSchema + logprob = schemas.NumberSchema + + + class bytes( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'bytes': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "token": token, + "logprob": logprob, + "bytes": bytes, + } + + logprob: MetaOapg.properties.logprob + bytes: MetaOapg.properties.bytes + token: MetaOapg.properties.token + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["token"]) -> MetaOapg.properties.token: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["logprob"]) -> MetaOapg.properties.logprob: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["bytes"]) -> MetaOapg.properties.bytes: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["token", "logprob", "bytes", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + logprob: typing.Union[MetaOapg.properties.logprob, decimal.Decimal, int, float, ], + bytes: typing.Union[MetaOapg.properties.bytes, list, tuple, None, ], + token: typing.Union[MetaOapg.properties.token, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'TopLogprob': + return super().__new__( + cls, + *_args, + logprob=logprob, + bytes=bytes, + token=token, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/triton_enhanced_runnable_image_flavor.py b/launch/api_client/model/triton_enhanced_runnable_image_flavor.py index 6604dd89..d884017b 100644 --- a/launch/api_client/model/triton_enhanced_runnable_image_flavor.py +++ b/launch/api_client/model/triton_enhanced_runnable_image_flavor.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class TritonEnhancedRunnableImageFlavor(schemas.DictSchema): +class TritonEnhancedRunnableImageFlavor( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class TritonEnhancedRunnableImageFlavor(schemas.DictSchema): For deployments that require tritonserver running in a container. """ + class MetaOapg: required = { "flavor", @@ -43,163 +46,325 @@ class MetaOapg: "command", "triton_num_cpu", } - + class properties: - class command(schemas.ListSchema): + repository = schemas.StrSchema + tag = schemas.StrSchema + + + class command( + schemas.ListSchema + ): + + class MetaOapg: items = schemas.StrSchema - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": + ) -> 'command': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - - class flavor(schemas.EnumBase, schemas.StrSchema): + + + class protocol( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { - "triton_enhanced_runnable_image": "TRITON_ENHANCED_RUNNABLE_IMAGE", + "http": "HTTP", } - + @schemas.classproperty - def TRITON_ENHANCED_RUNNABLE_IMAGE(cls): - return cls("triton_enhanced_runnable_image") - - class protocol(schemas.EnumBase, schemas.StrSchema): + def HTTP(cls): + return cls("http") + + + class flavor( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: enum_value_to_name = { - "http": "HTTP", + "triton_enhanced_runnable_image": "TRITON_ENHANCED_RUNNABLE_IMAGE", } - + @schemas.classproperty - def HTTP(cls): - return cls("http") - - repository = schemas.StrSchema - tag = schemas.StrSchema - triton_commit_tag = schemas.StrSchema + def TRITON_ENHANCED_RUNNABLE_IMAGE(cls): + return cls("triton_enhanced_runnable_image") triton_model_repository = schemas.StrSchema triton_num_cpu = schemas.NumberSchema - - class env(schemas.DictSchema): + triton_commit_tag = schemas.StrSchema + predict_route = schemas.StrSchema + healthcheck_route = schemas.StrSchema + + + class env( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'env': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - healthcheck_route = schemas.StrSchema - predict_route = schemas.StrSchema readiness_initial_delay_seconds = schemas.IntSchema - triton_memory = schemas.StrSchema - - class triton_model_replicas(schemas.DictSchema): + + + class extra_routes( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'extra_routes': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + + class routes( + schemas.ListSchema + ): + + + class MetaOapg: + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'routes': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + + class forwarder_type( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'forwarder_type': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class worker_command( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'worker_command': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class worker_env( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + class MetaOapg: additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: # dict_instance[name] accessor return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: return super().get_item_oapg(name) - + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "triton_model_replicas": + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'worker_env': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class triton_model_replicas( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'triton_model_replicas': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class triton_storage( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'triton_storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class triton_memory( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'triton_memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) triton_readiness_initial_delay_seconds = schemas.IntSchema - triton_storage = schemas.StrSchema __annotations__ = { - "command": command, - "flavor": flavor, - "protocol": protocol, "repository": repository, "tag": tag, - "triton_commit_tag": triton_commit_tag, + "command": command, + "protocol": protocol, + "flavor": flavor, "triton_model_repository": triton_model_repository, "triton_num_cpu": triton_num_cpu, - "env": env, - "healthcheck_route": healthcheck_route, + "triton_commit_tag": triton_commit_tag, "predict_route": predict_route, + "healthcheck_route": healthcheck_route, + "env": env, "readiness_initial_delay_seconds": readiness_initial_delay_seconds, - "triton_memory": triton_memory, + "extra_routes": extra_routes, + "routes": routes, + "forwarder_type": forwarder_type, + "worker_command": worker_command, + "worker_env": worker_env, "triton_model_replicas": triton_model_replicas, - "triton_readiness_initial_delay_seconds": triton_readiness_initial_delay_seconds, "triton_storage": triton_storage, + "triton_memory": triton_memory, + "triton_readiness_initial_delay_seconds": triton_readiness_initial_delay_seconds, } - + flavor: MetaOapg.properties.flavor protocol: MetaOapg.properties.protocol tag: MetaOapg.properties.tag @@ -208,301 +373,175 @@ def __new__( triton_model_repository: MetaOapg.properties.triton_model_repository command: MetaOapg.properties.command triton_num_cpu: MetaOapg.properties.triton_num_cpu - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: - ... - + def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: - ... - + def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_commit_tag"] - ) -> MetaOapg.properties.triton_commit_tag: - ... - + def __getitem__(self, name: typing_extensions.Literal["triton_model_repository"]) -> MetaOapg.properties.triton_model_repository: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_model_repository"] - ) -> MetaOapg.properties.triton_model_repository: - ... - + def __getitem__(self, name: typing_extensions.Literal["triton_num_cpu"]) -> MetaOapg.properties.triton_num_cpu: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_num_cpu"]) -> MetaOapg.properties.triton_num_cpu: - ... - + def __getitem__(self, name: typing_extensions.Literal["triton_commit_tag"]) -> MetaOapg.properties.triton_commit_tag: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: - ... - + def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> MetaOapg.properties.healthcheck_route: - ... - + def __getitem__(self, name: typing_extensions.Literal["healthcheck_route"]) -> MetaOapg.properties.healthcheck_route: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: - ... - + def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.readiness_initial_delay_seconds: - ... - + def __getitem__(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> MetaOapg.properties.readiness_initial_delay_seconds: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_memory"]) -> MetaOapg.properties.triton_memory: - ... - + def __getitem__(self, name: typing_extensions.Literal["extra_routes"]) -> MetaOapg.properties.extra_routes: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_model_replicas"] - ) -> MetaOapg.properties.triton_model_replicas: - ... - + def __getitem__(self, name: typing_extensions.Literal["routes"]) -> MetaOapg.properties.routes: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.triton_readiness_initial_delay_seconds: - ... - + def __getitem__(self, name: typing_extensions.Literal["forwarder_type"]) -> MetaOapg.properties.forwarder_type: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_storage"]) -> MetaOapg.properties.triton_storage: - ... - + def __getitem__(self, name: typing_extensions.Literal["worker_command"]) -> MetaOapg.properties.worker_command: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "triton_commit_tag", - "triton_model_repository", - "triton_num_cpu", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "triton_memory", - "triton_model_replicas", - "triton_readiness_initial_delay_seconds", - "triton_storage", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["worker_env"]) -> MetaOapg.properties.worker_env: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_model_replicas"]) -> MetaOapg.properties.triton_model_replicas: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_storage"]) -> MetaOapg.properties.triton_storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_memory"]) -> MetaOapg.properties.triton_memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"]) -> MetaOapg.properties.triton_readiness_initial_delay_seconds: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "triton_model_repository", "triton_num_cpu", "triton_commit_tag", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "triton_model_replicas", "triton_storage", "triton_memory", "triton_readiness_initial_delay_seconds", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_commit_tag"] - ) -> MetaOapg.properties.triton_commit_tag: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["triton_model_repository"]) -> MetaOapg.properties.triton_model_repository: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_model_repository"] - ) -> MetaOapg.properties.triton_model_repository: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["triton_num_cpu"]) -> MetaOapg.properties.triton_num_cpu: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["triton_num_cpu"]) -> MetaOapg.properties.triton_num_cpu: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["triton_commit_tag"]) -> MetaOapg.properties.triton_commit_tag: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["predict_route"]) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["healthcheck_route"]) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["predict_route"] - ) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["env"]) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_memory"] - ) -> typing.Union[MetaOapg.properties.triton_memory, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["extra_routes"]) -> typing.Union[MetaOapg.properties.extra_routes, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_model_replicas"] - ) -> typing.Union[MetaOapg.properties.triton_model_replicas, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["routes"]) -> typing.Union[MetaOapg.properties.routes, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.triton_readiness_initial_delay_seconds, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["forwarder_type"]) -> typing.Union[MetaOapg.properties.forwarder_type, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_storage"] - ) -> typing.Union[MetaOapg.properties.triton_storage, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["worker_command"]) -> typing.Union[MetaOapg.properties.worker_command, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "triton_commit_tag", - "triton_model_repository", - "triton_num_cpu", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "triton_memory", - "triton_model_replicas", - "triton_readiness_initial_delay_seconds", - "triton_storage", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["worker_env"]) -> typing.Union[MetaOapg.properties.worker_env, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_model_replicas"]) -> typing.Union[MetaOapg.properties.triton_model_replicas, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_storage"]) -> typing.Union[MetaOapg.properties.triton_storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_memory"]) -> typing.Union[MetaOapg.properties.triton_memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"]) -> typing.Union[MetaOapg.properties.triton_readiness_initial_delay_seconds, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["repository", "tag", "command", "protocol", "flavor", "triton_model_repository", "triton_num_cpu", "triton_commit_tag", "predict_route", "healthcheck_route", "env", "readiness_initial_delay_seconds", "extra_routes", "routes", "forwarder_type", "worker_command", "worker_env", "triton_model_replicas", "triton_storage", "triton_memory", "triton_readiness_initial_delay_seconds", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - protocol: typing.Union[ - MetaOapg.properties.protocol, - str, - ], - tag: typing.Union[ - MetaOapg.properties.tag, - str, - ], - repository: typing.Union[ - MetaOapg.properties.repository, - str, - ], - triton_commit_tag: typing.Union[ - MetaOapg.properties.triton_commit_tag, - str, - ], - triton_model_repository: typing.Union[ - MetaOapg.properties.triton_model_repository, - str, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], - triton_num_cpu: typing.Union[ - MetaOapg.properties.triton_num_cpu, - decimal.Decimal, - int, - float, - ], - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + flavor: typing.Union[MetaOapg.properties.flavor, str, ], + protocol: typing.Union[MetaOapg.properties.protocol, str, ], + tag: typing.Union[MetaOapg.properties.tag, str, ], + repository: typing.Union[MetaOapg.properties.repository, str, ], + triton_commit_tag: typing.Union[MetaOapg.properties.triton_commit_tag, str, ], + triton_model_repository: typing.Union[MetaOapg.properties.triton_model_repository, str, ], + command: typing.Union[MetaOapg.properties.command, list, tuple, ], + triton_num_cpu: typing.Union[MetaOapg.properties.triton_num_cpu, decimal.Decimal, int, float, ], predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - triton_memory: typing.Union[MetaOapg.properties.triton_memory, str, schemas.Unset] = schemas.unset, - triton_model_replicas: typing.Union[ - MetaOapg.properties.triton_model_replicas, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - triton_readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.triton_readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - triton_storage: typing.Union[MetaOapg.properties.triton_storage, str, schemas.Unset] = schemas.unset, + healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, + env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, + extra_routes: typing.Union[MetaOapg.properties.extra_routes, list, tuple, schemas.Unset] = schemas.unset, + routes: typing.Union[MetaOapg.properties.routes, list, tuple, schemas.Unset] = schemas.unset, + forwarder_type: typing.Union[MetaOapg.properties.forwarder_type, None, str, schemas.Unset] = schemas.unset, + worker_command: typing.Union[MetaOapg.properties.worker_command, list, tuple, None, schemas.Unset] = schemas.unset, + worker_env: typing.Union[MetaOapg.properties.worker_env, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + triton_model_replicas: typing.Union[MetaOapg.properties.triton_model_replicas, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + triton_storage: typing.Union[MetaOapg.properties.triton_storage, None, str, schemas.Unset] = schemas.unset, + triton_memory: typing.Union[MetaOapg.properties.triton_memory, None, str, schemas.Unset] = schemas.unset, + triton_readiness_initial_delay_seconds: typing.Union[MetaOapg.properties.triton_readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "TritonEnhancedRunnableImageFlavor": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'TritonEnhancedRunnableImageFlavor': return super().__new__( cls, *_args, @@ -514,14 +553,19 @@ def __new__( triton_model_repository=triton_model_repository, command=command, triton_num_cpu=triton_num_cpu, - env=env, - healthcheck_route=healthcheck_route, predict_route=predict_route, + healthcheck_route=healthcheck_route, + env=env, readiness_initial_delay_seconds=readiness_initial_delay_seconds, - triton_memory=triton_memory, + extra_routes=extra_routes, + routes=routes, + forwarder_type=forwarder_type, + worker_command=worker_command, + worker_env=worker_env, triton_model_replicas=triton_model_replicas, - triton_readiness_initial_delay_seconds=triton_readiness_initial_delay_seconds, triton_storage=triton_storage, + triton_memory=triton_memory, + triton_readiness_initial_delay_seconds=triton_readiness_initial_delay_seconds, _configuration=_configuration, **kwargs, ) diff --git a/launch/api_client/model/triton_enhanced_runnable_image_flavor.pyi b/launch/api_client/model/triton_enhanced_runnable_image_flavor.pyi deleted file mode 100644 index c5f86606..00000000 --- a/launch/api_client/model/triton_enhanced_runnable_image_flavor.pyi +++ /dev/null @@ -1,435 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class TritonEnhancedRunnableImageFlavor(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - For deployments that require tritonserver running in a container. - """ - - class MetaOapg: - required = { - "flavor", - "protocol", - "tag", - "repository", - "triton_commit_tag", - "triton_model_repository", - "command", - "triton_num_cpu", - } - - class properties: - class command(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "command": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - class flavor(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def TRITON_ENHANCED_RUNNABLE_IMAGE(cls): - return cls("triton_enhanced_runnable_image") - - class protocol(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def HTTP(cls): - return cls("http") - repository = schemas.StrSchema - tag = schemas.StrSchema - triton_commit_tag = schemas.StrSchema - triton_model_repository = schemas.StrSchema - triton_num_cpu = schemas.NumberSchema - - class env(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "env": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - healthcheck_route = schemas.StrSchema - predict_route = schemas.StrSchema - readiness_initial_delay_seconds = schemas.IntSchema - triton_memory = schemas.StrSchema - - class triton_model_replicas(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "triton_model_replicas": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - triton_readiness_initial_delay_seconds = schemas.IntSchema - triton_storage = schemas.StrSchema - __annotations__ = { - "command": command, - "flavor": flavor, - "protocol": protocol, - "repository": repository, - "tag": tag, - "triton_commit_tag": triton_commit_tag, - "triton_model_repository": triton_model_repository, - "triton_num_cpu": triton_num_cpu, - "env": env, - "healthcheck_route": healthcheck_route, - "predict_route": predict_route, - "readiness_initial_delay_seconds": readiness_initial_delay_seconds, - "triton_memory": triton_memory, - "triton_model_replicas": triton_model_replicas, - "triton_readiness_initial_delay_seconds": triton_readiness_initial_delay_seconds, - "triton_storage": triton_storage, - } - flavor: MetaOapg.properties.flavor - protocol: MetaOapg.properties.protocol - tag: MetaOapg.properties.tag - repository: MetaOapg.properties.repository - triton_commit_tag: MetaOapg.properties.triton_commit_tag - triton_model_repository: MetaOapg.properties.triton_model_repository - command: MetaOapg.properties.command - triton_num_cpu: MetaOapg.properties.triton_num_cpu - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_commit_tag"] - ) -> MetaOapg.properties.triton_commit_tag: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_model_repository"] - ) -> MetaOapg.properties.triton_model_repository: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_num_cpu"]) -> MetaOapg.properties.triton_num_cpu: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["env"]) -> MetaOapg.properties.env: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> MetaOapg.properties.healthcheck_route: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["predict_route"]) -> MetaOapg.properties.predict_route: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.readiness_initial_delay_seconds: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_memory"]) -> MetaOapg.properties.triton_memory: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_model_replicas"] - ) -> MetaOapg.properties.triton_model_replicas: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"] - ) -> MetaOapg.properties.triton_readiness_initial_delay_seconds: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["triton_storage"]) -> MetaOapg.properties.triton_storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "triton_commit_tag", - "triton_model_repository", - "triton_num_cpu", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "triton_memory", - "triton_model_replicas", - "triton_readiness_initial_delay_seconds", - "triton_storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["command"]) -> MetaOapg.properties.command: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["protocol"]) -> MetaOapg.properties.protocol: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["repository"]) -> MetaOapg.properties.repository: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["tag"]) -> MetaOapg.properties.tag: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_commit_tag"] - ) -> MetaOapg.properties.triton_commit_tag: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_model_repository"] - ) -> MetaOapg.properties.triton_model_repository: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_num_cpu"] - ) -> MetaOapg.properties.triton_num_cpu: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["env"] - ) -> typing.Union[MetaOapg.properties.env, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["healthcheck_route"] - ) -> typing.Union[MetaOapg.properties.healthcheck_route, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["predict_route"] - ) -> typing.Union[MetaOapg.properties.predict_route, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.readiness_initial_delay_seconds, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_memory"] - ) -> typing.Union[MetaOapg.properties.triton_memory, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_model_replicas"] - ) -> typing.Union[MetaOapg.properties.triton_model_replicas, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_readiness_initial_delay_seconds"] - ) -> typing.Union[MetaOapg.properties.triton_readiness_initial_delay_seconds, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["triton_storage"] - ) -> typing.Union[MetaOapg.properties.triton_storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "command", - "flavor", - "protocol", - "repository", - "tag", - "triton_commit_tag", - "triton_model_repository", - "triton_num_cpu", - "env", - "healthcheck_route", - "predict_route", - "readiness_initial_delay_seconds", - "triton_memory", - "triton_model_replicas", - "triton_readiness_initial_delay_seconds", - "triton_storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - protocol: typing.Union[ - MetaOapg.properties.protocol, - str, - ], - tag: typing.Union[ - MetaOapg.properties.tag, - str, - ], - repository: typing.Union[ - MetaOapg.properties.repository, - str, - ], - triton_commit_tag: typing.Union[ - MetaOapg.properties.triton_commit_tag, - str, - ], - triton_model_repository: typing.Union[ - MetaOapg.properties.triton_model_repository, - str, - ], - command: typing.Union[ - MetaOapg.properties.command, - list, - tuple, - ], - triton_num_cpu: typing.Union[ - MetaOapg.properties.triton_num_cpu, - decimal.Decimal, - int, - float, - ], - env: typing.Union[MetaOapg.properties.env, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - healthcheck_route: typing.Union[MetaOapg.properties.healthcheck_route, str, schemas.Unset] = schemas.unset, - predict_route: typing.Union[MetaOapg.properties.predict_route, str, schemas.Unset] = schemas.unset, - readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - triton_memory: typing.Union[MetaOapg.properties.triton_memory, str, schemas.Unset] = schemas.unset, - triton_model_replicas: typing.Union[ - MetaOapg.properties.triton_model_replicas, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - triton_readiness_initial_delay_seconds: typing.Union[ - MetaOapg.properties.triton_readiness_initial_delay_seconds, decimal.Decimal, int, schemas.Unset - ] = schemas.unset, - triton_storage: typing.Union[MetaOapg.properties.triton_storage, str, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "TritonEnhancedRunnableImageFlavor": - return super().__new__( - cls, - *_args, - flavor=flavor, - protocol=protocol, - tag=tag, - repository=repository, - triton_commit_tag=triton_commit_tag, - triton_model_repository=triton_model_repository, - command=command, - triton_num_cpu=triton_num_cpu, - env=env, - healthcheck_route=healthcheck_route, - predict_route=predict_route, - readiness_initial_delay_seconds=readiness_initial_delay_seconds, - triton_memory=triton_memory, - triton_model_replicas=triton_model_replicas, - triton_readiness_initial_delay_seconds=triton_readiness_initial_delay_seconds, - triton_storage=triton_storage, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_batch_completions_v2_request.py b/launch/api_client/model/update_batch_completions_v2_request.py new file mode 100644 index 00000000..033d152f --- /dev/null +++ b/launch/api_client/model/update_batch_completions_v2_request.py @@ -0,0 +1,112 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UpdateBatchCompletionsV2Request( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "job_id", + } + + class properties: + job_id = schemas.StrSchema + + + class priority( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "job_id": job_id, + "priority": priority, + } + + job_id: MetaOapg.properties.job_id + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", "priority", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", "priority", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + job_id: typing.Union[MetaOapg.properties.job_id, str, ], + priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateBatchCompletionsV2Request': + return super().__new__( + cls, + *_args, + job_id=job_id, + priority=priority, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/update_batch_completions_v2_response.py b/launch/api_client/model/update_batch_completions_v2_response.py new file mode 100644 index 00000000..8ff3fab3 --- /dev/null +++ b/launch/api_client/model/update_batch_completions_v2_response.py @@ -0,0 +1,301 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UpdateBatchCompletionsV2Response( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "completed_at", + "metadata", + "expires_at", + "model_config", + "job_id", + "success", + "created_at", + "output_data_path", + "status", + } + + class properties: + job_id = schemas.StrSchema + output_data_path = schemas.StrSchema + + @staticmethod + def model_config() -> typing.Type['BatchCompletionsModelConfig']: + return BatchCompletionsModelConfig + + @staticmethod + def status() -> typing.Type['BatchCompletionsJobStatus']: + return BatchCompletionsJobStatus + created_at = schemas.StrSchema + expires_at = schemas.StrSchema + + + class completed_at( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'completed_at': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + success = schemas.BoolSchema + + + class input_data_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'input_data_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class priority( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "job_id": job_id, + "output_data_path": output_data_path, + "model_config": model_config, + "status": status, + "created_at": created_at, + "expires_at": expires_at, + "completed_at": completed_at, + "metadata": metadata, + "success": success, + "input_data_path": input_data_path, + "priority": priority, + } + + completed_at: MetaOapg.properties.completed_at + metadata: MetaOapg.properties.metadata + expires_at: MetaOapg.properties.expires_at + model_config: 'BatchCompletionsModelConfig' + job_id: MetaOapg.properties.job_id + success: MetaOapg.properties.success + created_at: MetaOapg.properties.created_at + output_data_path: MetaOapg.properties.output_data_path + status: 'BatchCompletionsJobStatus' + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["input_data_path"]) -> MetaOapg.properties.input_data_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["priority"]) -> MetaOapg.properties.priority: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "success", "input_data_path", "priority", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["job_id"]) -> MetaOapg.properties.job_id: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["output_data_path"]) -> MetaOapg.properties.output_data_path: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_config"]) -> 'BatchCompletionsModelConfig': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["status"]) -> 'BatchCompletionsJobStatus': ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["created_at"]) -> MetaOapg.properties.created_at: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["expires_at"]) -> MetaOapg.properties.expires_at: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["completed_at"]) -> MetaOapg.properties.completed_at: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["input_data_path"]) -> typing.Union[MetaOapg.properties.input_data_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["priority"]) -> typing.Union[MetaOapg.properties.priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["job_id", "output_data_path", "model_config", "status", "created_at", "expires_at", "completed_at", "metadata", "success", "input_data_path", "priority", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + completed_at: typing.Union[MetaOapg.properties.completed_at, None, str, ], + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, ], + expires_at: typing.Union[MetaOapg.properties.expires_at, str, ], + model_config: 'BatchCompletionsModelConfig', + job_id: typing.Union[MetaOapg.properties.job_id, str, ], + success: typing.Union[MetaOapg.properties.success, bool, ], + created_at: typing.Union[MetaOapg.properties.created_at, str, ], + output_data_path: typing.Union[MetaOapg.properties.output_data_path, str, ], + status: 'BatchCompletionsJobStatus', + input_data_path: typing.Union[MetaOapg.properties.input_data_path, None, str, schemas.Unset] = schemas.unset, + priority: typing.Union[MetaOapg.properties.priority, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateBatchCompletionsV2Response': + return super().__new__( + cls, + *_args, + completed_at=completed_at, + metadata=metadata, + expires_at=expires_at, + model_config=model_config, + job_id=job_id, + success=success, + created_at=created_at, + output_data_path=output_data_path, + status=status, + input_data_path=input_data_path, + priority=priority, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.batch_completions_job_status import ( + BatchCompletionsJobStatus, +) +from launch.api_client.model.batch_completions_model_config import ( + BatchCompletionsModelConfig, +) diff --git a/launch/api_client/model/update_batch_job_v1_request.py b/launch/api_client/model/update_batch_job_v1_request.py index 3e1de5a1..963401fd 100644 --- a/launch/api_client/model/update_batch_job_v1_request.py +++ b/launch/api_client/model/update_batch_job_v1_request.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class UpdateBatchJobV1Request(schemas.DictSchema): +class UpdateBatchJobV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "cancel", } - + class properties: cancel = schemas.BoolSchema __annotations__ = { "cancel": cancel, } - + cancel: MetaOapg.properties.cancel - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: - ... - + def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cancel: typing.Union[ - MetaOapg.properties.cancel, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + cancel: typing.Union[MetaOapg.properties.cancel, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateBatchJobV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateBatchJobV1Request': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_batch_job_v1_response.py b/launch/api_client/model/update_batch_job_v1_response.py index 00236e84..9ccfc677 100644 --- a/launch/api_client/model/update_batch_job_v1_response.py +++ b/launch/api_client/model/update_batch_job_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class UpdateBatchJobV1Response(schemas.DictSchema): +class UpdateBatchJobV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "success", } - + class properties: success = schemas.BoolSchema __annotations__ = { "success": success, } - + success: MetaOapg.properties.success - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + success: typing.Union[MetaOapg.properties.success, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateBatchJobV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_batch_job_v1_response.pyi b/launch/api_client/model/update_batch_job_v1_response.pyi deleted file mode 100644 index 25a2b4b7..00000000 --- a/launch/api_client/model/update_batch_job_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateBatchJobV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateBatchJobV1Response": - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_deep_speed_model_endpoint_request.py b/launch/api_client/model/update_deep_speed_model_endpoint_request.py new file mode 100644 index 00000000..6c1902e6 --- /dev/null +++ b/launch/api_client/model/update_deep_speed_model_endpoint_request.py @@ -0,0 +1,952 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UpdateDeepSpeedModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "deepspeed": "DEEPSPEED", + } + + @schemas.classproperty + def DEEPSPEED(cls): + return cls("deepspeed") + + + class inference_framework_image_tag( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'inference_framework_image_tag': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_shards( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_shards': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class force_bundle_recreation( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'force_bundle_recreation': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + __annotations__ = { + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "model_name": model_name, + "source": source, + "inference_framework": inference_framework, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "metadata": metadata, + "force_bundle_recreation": force_bundle_recreation, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, + min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateDeepSpeedModelEndpointRequest': + return super().__new__( + cls, + *_args, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + model_name=model_name, + source=source, + inference_framework=inference_framework, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + metadata=metadata, + force_bundle_recreation=force_bundle_recreation, + min_workers=min_workers, + max_workers=max_workers, + per_worker=per_worker, + labels=labels, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/update_docker_image_batch_job_v1_request.py b/launch/api_client/model/update_docker_image_batch_job_v1_request.py index c7706fab..4cbb5548 100644 --- a/launch/api_client/model/update_docker_image_batch_job_v1_request.py +++ b/launch/api_client/model/update_docker_image_batch_job_v1_request.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class UpdateDockerImageBatchJobV1Request(schemas.DictSchema): +class UpdateDockerImageBatchJobV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "cancel", } - + class properties: cancel = schemas.BoolSchema __annotations__ = { "cancel": cancel, } - + cancel: MetaOapg.properties.cancel - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: - ... - + def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cancel", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cancel: typing.Union[ - MetaOapg.properties.cancel, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + cancel: typing.Union[MetaOapg.properties.cancel, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateDockerImageBatchJobV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateDockerImageBatchJobV1Request': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_docker_image_batch_job_v1_request.pyi b/launch/api_client/model/update_docker_image_batch_job_v1_request.pyi deleted file mode 100644 index be8e0cbe..00000000 --- a/launch/api_client/model/update_docker_image_batch_job_v1_request.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateDockerImageBatchJobV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "cancel", - } - - class properties: - cancel = schemas.BoolSchema - __annotations__ = { - "cancel": cancel, - } - cancel: MetaOapg.properties.cancel - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["cancel"]) -> MetaOapg.properties.cancel: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["cancel",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cancel: typing.Union[ - MetaOapg.properties.cancel, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateDockerImageBatchJobV1Request": - return super().__new__( - cls, - *_args, - cancel=cancel, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_docker_image_batch_job_v1_response.py b/launch/api_client/model/update_docker_image_batch_job_v1_response.py index 6331941e..ebe4960d 100644 --- a/launch/api_client/model/update_docker_image_batch_job_v1_response.py +++ b/launch/api_client/model/update_docker_image_batch_job_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class UpdateDockerImageBatchJobV1Response(schemas.DictSchema): +class UpdateDockerImageBatchJobV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "success", } - + class properties: success = schemas.BoolSchema __annotations__ = { "success": success, } - + success: MetaOapg.properties.success - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + success: typing.Union[MetaOapg.properties.success, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateDockerImageBatchJobV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateDockerImageBatchJobV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_docker_image_batch_job_v1_response.pyi b/launch/api_client/model/update_docker_image_batch_job_v1_response.pyi deleted file mode 100644 index d654616d..00000000 --- a/launch/api_client/model/update_docker_image_batch_job_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateDockerImageBatchJobV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateDockerImageBatchJobV1Response": - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_llm_model_endpoint_v1_request.py b/launch/api_client/model/update_llm_model_endpoint_v1_request.py index ce2d3e04..d4f00914 100644 --- a/launch/api_client/model/update_llm_model_endpoint_v1_request.py +++ b/launch/api_client/model/update_llm_model_endpoint_v1_request.py @@ -23,822 +23,58 @@ from launch.api_client import schemas # noqa: F401 -class UpdateLLMModelEndpointV1Request(schemas.DictSchema): +class UpdateLLMModelEndpointV1Request( + schemas.ComposedSchema, +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ - class MetaOapg: - class properties: - billing_tags = schemas.DictSchema - checkpoint_path = schemas.StrSchema - - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - class MetaOapg: - format = "uri" - max_length = 2083 - min_length = 1 - - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - - gpus = schemas.IntSchema - high_priority = schemas.BoolSchema - inference_framework_image_tag = schemas.StrSchema - - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - max_workers = schemas.IntSchema - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - metadata = schemas.DictSchema - min_workers = schemas.IntSchema - model_name = schemas.StrSchema - num_shards = schemas.IntSchema - optimize_costs = schemas.BoolSchema - per_worker = schemas.IntSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - - @staticmethod - def quantize() -> typing.Type["Quantization"]: - return Quantization - - @staticmethod - def source() -> typing.Type["LLMSource"]: - return LLMSource - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - __annotations__ = { - "billing_tags": billing_tags, - "checkpoint_path": checkpoint_path, - "cpus": cpus, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "gpu_type": gpu_type, - "gpus": gpus, - "high_priority": high_priority, - "inference_framework_image_tag": inference_framework_image_tag, - "labels": labels, - "max_workers": max_workers, - "memory": memory, - "metadata": metadata, - "min_workers": min_workers, - "model_name": model_name, - "num_shards": num_shards, - "optimize_costs": optimize_costs, - "per_worker": per_worker, - "post_inference_hooks": post_inference_hooks, - "prewarm": prewarm, - "public_inference": public_inference, - "quantize": quantize, - "source": source, - "storage": storage, - } - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> MetaOapg.properties.inference_framework_image_tag: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> "LLMSource": - ... - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "inference_framework_image_tag", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_name", - "num_shards", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["labels"] - ) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_workers"] - ) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["min_workers"] - ) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_name"] - ) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["per_worker"] - ) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union["Quantization", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union["LLMSource", schemas.Unset]: - ... - - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... + class MetaOapg: + + @classmethod + @functools.lru_cache() + def one_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + UpdateVLLMModelEndpointRequest, + UpdateSGLangModelEndpointRequest, + UpdateDeepSpeedModelEndpointRequest, + UpdateTextGenerationInferenceModelEndpointRequest, + ] - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "inference_framework_image_tag", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_name", - "num_shards", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[ - MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset - ] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - model_name: typing.Union[MetaOapg.properties.model_name, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - source: typing.Union["LLMSource", schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateLLMModelEndpointV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateLLMModelEndpointV1Request': return super().__new__( cls, *_args, - billing_tags=billing_tags, - checkpoint_path=checkpoint_path, - cpus=cpus, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - gpu_type=gpu_type, - gpus=gpus, - high_priority=high_priority, - inference_framework_image_tag=inference_framework_image_tag, - labels=labels, - max_workers=max_workers, - memory=memory, - metadata=metadata, - min_workers=min_workers, - model_name=model_name, - num_shards=num_shards, - optimize_costs=optimize_costs, - per_worker=per_worker, - post_inference_hooks=post_inference_hooks, - prewarm=prewarm, - public_inference=public_inference, - quantize=quantize, - source=source, - storage=storage, _configuration=_configuration, **kwargs, ) - -from launch.api_client.model.callback_auth import CallbackAuth -from launch.api_client.model.gpu_type import GpuType -from launch.api_client.model.llm_source import LLMSource -from launch.api_client.model.quantization import Quantization +from launch.api_client.model.update_deep_speed_model_endpoint_request import ( + UpdateDeepSpeedModelEndpointRequest, +) +from launch.api_client.model.update_sg_lang_model_endpoint_request import ( + UpdateSGLangModelEndpointRequest, +) +from launch.api_client.model.update_text_generation_inference_model_endpoint_request import ( + UpdateTextGenerationInferenceModelEndpointRequest, +) +from launch.api_client.model.update_vllm_model_endpoint_request import ( + UpdateVLLMModelEndpointRequest, +) diff --git a/launch/api_client/model/update_llm_model_endpoint_v1_request.pyi b/launch/api_client/model/update_llm_model_endpoint_v1_request.pyi deleted file mode 100644 index a7e2e8ea..00000000 --- a/launch/api_client/model/update_llm_model_endpoint_v1_request.pyi +++ /dev/null @@ -1,725 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateLLMModelEndpointV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - billing_tags = schemas.DictSchema - checkpoint_path = schemas.StrSchema - - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - pass - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - gpus = schemas.IntSchema - high_priority = schemas.BoolSchema - inference_framework_image_tag = schemas.StrSchema - - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - max_workers = schemas.IntSchema - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - metadata = schemas.DictSchema - min_workers = schemas.IntSchema - model_name = schemas.StrSchema - num_shards = schemas.IntSchema - optimize_costs = schemas.BoolSchema - per_worker = schemas.IntSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - - @staticmethod - def quantize() -> typing.Type["Quantization"]: - return Quantization - @staticmethod - def source() -> typing.Type["LLMSource"]: - return LLMSource - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "billing_tags": billing_tags, - "checkpoint_path": checkpoint_path, - "cpus": cpus, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "gpu_type": gpu_type, - "gpus": gpus, - "high_priority": high_priority, - "inference_framework_image_tag": inference_framework_image_tag, - "labels": labels, - "max_workers": max_workers, - "memory": memory, - "metadata": metadata, - "min_workers": min_workers, - "model_name": model_name, - "num_shards": num_shards, - "optimize_costs": optimize_costs, - "per_worker": per_worker, - "post_inference_hooks": post_inference_hooks, - "prewarm": prewarm, - "public_inference": public_inference, - "quantize": quantize, - "source": source, - "storage": storage, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> MetaOapg.properties.checkpoint_path: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> MetaOapg.properties.inference_framework_image_tag: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["public_inference"] - ) -> MetaOapg.properties.public_inference: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> "Quantization": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["source"]) -> "LLMSource": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "inference_framework_image_tag", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_name", - "num_shards", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["checkpoint_path"] - ) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["inference_framework_image_tag"] - ) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["labels"] - ) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_workers"] - ) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["min_workers"] - ) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_name"] - ) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["num_shards"] - ) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["per_worker"] - ) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["quantize"] - ) -> typing.Union["Quantization", schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union["LLMSource", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "checkpoint_path", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "inference_framework_image_tag", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_name", - "num_shards", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "quantize", - "source", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, str, schemas.Unset] = schemas.unset, - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - inference_framework_image_tag: typing.Union[ - MetaOapg.properties.inference_framework_image_tag, str, schemas.Unset - ] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - model_name: typing.Union[MetaOapg.properties.model_name, str, schemas.Unset] = schemas.unset, - num_shards: typing.Union[MetaOapg.properties.num_shards, decimal.Decimal, int, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - quantize: typing.Union["Quantization", schemas.Unset] = schemas.unset, - source: typing.Union["LLMSource", schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateLLMModelEndpointV1Request": - return super().__new__( - cls, - *_args, - billing_tags=billing_tags, - checkpoint_path=checkpoint_path, - cpus=cpus, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - gpu_type=gpu_type, - gpus=gpus, - high_priority=high_priority, - inference_framework_image_tag=inference_framework_image_tag, - labels=labels, - max_workers=max_workers, - memory=memory, - metadata=metadata, - min_workers=min_workers, - model_name=model_name, - num_shards=num_shards, - optimize_costs=optimize_costs, - per_worker=per_worker, - post_inference_hooks=post_inference_hooks, - prewarm=prewarm, - public_inference=public_inference, - quantize=quantize, - source=source, - storage=storage, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.callback_auth import CallbackAuth -from launch_client.model.gpu_type import GpuType -from launch_client.model.llm_source import LLMSource -from launch_client.model.quantization import Quantization diff --git a/launch/api_client/model/update_llm_model_endpoint_v1_response.py b/launch/api_client/model/update_llm_model_endpoint_v1_response.py index 346f421f..a7ab1635 100644 --- a/launch/api_client/model/update_llm_model_endpoint_v1_response.py +++ b/launch/api_client/model/update_llm_model_endpoint_v1_response.py @@ -23,93 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class UpdateLLMModelEndpointV1Response(schemas.DictSchema): +class UpdateLLMModelEndpointV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "endpoint_creation_task_id", } - + class properties: endpoint_creation_task_id = schemas.StrSchema __annotations__ = { "endpoint_creation_task_id": endpoint_creation_task_id, } - + endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateLLMModelEndpointV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateLLMModelEndpointV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_llm_model_endpoint_v1_response.pyi b/launch/api_client/model/update_llm_model_endpoint_v1_response.pyi deleted file mode 100644 index 8939abbd..00000000 --- a/launch/api_client/model/update_llm_model_endpoint_v1_response.pyi +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateLLMModelEndpointV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateLLMModelEndpointV1Response": - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_model_endpoint_v1_request.py b/launch/api_client/model/update_model_endpoint_v1_request.py index 71db75f4..a62beca6 100644 --- a/launch/api_client/model/update_model_endpoint_v1_request.py +++ b/launch/api_client/model/update_model_endpoint_v1_request.py @@ -23,25 +23,108 @@ from launch.api_client import schemas # noqa: F401 -class UpdateModelEndpointV1Request(schemas.DictSchema): +class UpdateModelEndpointV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: + class properties: - billing_tags = schemas.DictSchema - + + + class model_bundle_id( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model_bundle_id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + class cpus( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -57,120 +140,56 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - class MetaOapg: - format = "uri" - max_length = 2083 - min_length = 1 - - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - - class gpus(schemas.IntSchema): + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + class MetaOapg: inclusive_minimum = 0 - - high_priority = schemas.BoolSchema - - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], + *_args: typing.Union[None, decimal.Decimal, int, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": + ) -> 'gpus': return super().__new__( cls, *_args, _configuration=_configuration, - **kwargs, ) - - class max_workers(schemas.IntSchema): - class MetaOapg: - inclusive_minimum = 0 - + + class memory( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -186,104 +205,36 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - metadata = schemas.DictSchema - - class min_workers(schemas.IntSchema): - class MetaOapg: - inclusive_minimum = 0 - - model_bundle_id = schemas.StrSchema - optimize_costs = schemas.BoolSchema - per_worker = schemas.IntSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + class storage( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema any_of_2 = schemas.NumberSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -299,467 +250,488 @@ def any_of(cls): cls.any_of_1, cls.any_of_2, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + class MetaOapg: + inclusive_minimum = 0 + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class concurrent_requests_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'concurrent_requests_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { - "billing_tags": billing_tags, + "model_bundle_id": model_bundle_id, + "metadata": metadata, + "post_inference_hooks": post_inference_hooks, "cpus": cpus, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "gpu_type": gpu_type, "gpus": gpus, - "high_priority": high_priority, - "labels": labels, - "max_workers": max_workers, "memory": memory, - "metadata": metadata, - "min_workers": min_workers, - "model_bundle_id": model_bundle_id, + "gpu_type": gpu_type, + "storage": storage, "optimize_costs": optimize_costs, + "min_workers": min_workers, + "max_workers": max_workers, "per_worker": per_worker, - "post_inference_hooks": post_inference_hooks, + "concurrent_requests_per_worker": concurrent_requests_per_worker, + "labels": labels, "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, "public_inference": public_inference, - "storage": storage, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: - ... - + def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": - ... - + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: - ... - + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": - ... - + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: - ... - + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: - ... - + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: - ... - + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: - ... - + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: - ... - + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: - ... - + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["model_bundle_id"]) -> MetaOapg.properties.model_bundle_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> MetaOapg.properties.concurrent_requests_per_worker: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: - ... - + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: - ... - + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: - ... - + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: - ... - + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: - ... - + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "metadata", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "optimize_costs", "min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "labels", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["model_bundle_id"]) -> typing.Union[MetaOapg.properties.model_bundle_id, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["labels"] - ) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_workers"] - ) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["min_workers"] - ) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> typing.Union[MetaOapg.properties.model_bundle_id, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["concurrent_requests_per_worker"]) -> typing.Union[MetaOapg.properties.concurrent_requests_per_worker, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["per_worker"] - ) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["model_bundle_id", "metadata", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "optimize_costs", "min_workers", "max_workers", "per_worker", "concurrent_requests_per_worker", "labels", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, None, str, schemas.Unset] = schemas.unset, + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + concurrent_requests_per_worker: typing.Union[MetaOapg.properties.concurrent_requests_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateModelEndpointV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateModelEndpointV1Request': return super().__new__( cls, *_args, - billing_tags=billing_tags, + model_bundle_id=model_bundle_id, + metadata=metadata, + post_inference_hooks=post_inference_hooks, cpus=cpus, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - gpu_type=gpu_type, gpus=gpus, - high_priority=high_priority, - labels=labels, - max_workers=max_workers, memory=memory, - metadata=metadata, - min_workers=min_workers, - model_bundle_id=model_bundle_id, + gpu_type=gpu_type, + storage=storage, optimize_costs=optimize_costs, + min_workers=min_workers, + max_workers=max_workers, per_worker=per_worker, - post_inference_hooks=post_inference_hooks, + concurrent_requests_per_worker=concurrent_requests_per_worker, + labels=labels, prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, public_inference=public_inference, - storage=storage, _configuration=_configuration, **kwargs, ) - from launch.api_client.model.callback_auth import CallbackAuth from launch.api_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/update_model_endpoint_v1_request.pyi b/launch/api_client/model/update_model_endpoint_v1_request.pyi deleted file mode 100644 index 1334acdc..00000000 --- a/launch/api_client/model/update_model_endpoint_v1_request.pyi +++ /dev/null @@ -1,662 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateModelEndpointV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - billing_tags = schemas.DictSchema - - class cpus( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "cpus": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - @staticmethod - def default_callback_auth() -> typing.Type["CallbackAuth"]: - return CallbackAuth - - class default_callback_url(schemas.StrSchema): - pass - @staticmethod - def gpu_type() -> typing.Type["GpuType"]: - return GpuType - - class gpus(schemas.IntSchema): - pass - high_priority = schemas.BoolSchema - - class labels(schemas.DictSchema): - class MetaOapg: - additional_properties = schemas.StrSchema - def __getitem__( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - # dict_instance[name] accessor - return super().__getitem__(name) - def get_item_oapg( - self, - name: typing.Union[str,], - ) -> MetaOapg.additional_properties: - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - MetaOapg.additional_properties, - str, - ], - ) -> "labels": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - - class max_workers(schemas.IntSchema): - pass - - class memory( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "memory": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - metadata = schemas.DictSchema - - class min_workers(schemas.IntSchema): - pass - model_bundle_id = schemas.StrSchema - optimize_costs = schemas.BoolSchema - per_worker = schemas.IntSchema - - class post_inference_hooks(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "post_inference_hooks": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - prewarm = schemas.BoolSchema - public_inference = schemas.BoolSchema - - class storage( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - any_of_2 = schemas.NumberSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - cls.any_of_2, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "storage": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - __annotations__ = { - "billing_tags": billing_tags, - "cpus": cpus, - "default_callback_auth": default_callback_auth, - "default_callback_url": default_callback_url, - "gpu_type": gpu_type, - "gpus": gpus, - "high_priority": high_priority, - "labels": labels, - "max_workers": max_workers, - "memory": memory, - "metadata": metadata, - "min_workers": min_workers, - "model_bundle_id": model_bundle_id, - "optimize_costs": optimize_costs, - "per_worker": per_worker, - "post_inference_hooks": post_inference_hooks, - "prewarm": prewarm, - "public_inference": public_inference, - "storage": storage, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> "CallbackAuth": ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> MetaOapg.properties.default_callback_url: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> "GpuType": ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> MetaOapg.properties.model_bundle_id: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> MetaOapg.properties.post_inference_hooks: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["public_inference"] - ) -> MetaOapg.properties.public_inference: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["billing_tags"] - ) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cpus"] - ) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_auth"] - ) -> typing.Union["CallbackAuth", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["default_callback_url"] - ) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union["GpuType", schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["gpus"] - ) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["high_priority"] - ) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["labels"] - ) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["max_workers"] - ) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["memory"] - ) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["metadata"] - ) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["min_workers"] - ) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["model_bundle_id"] - ) -> typing.Union[MetaOapg.properties.model_bundle_id, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["optimize_costs"] - ) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["per_worker"] - ) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["post_inference_hooks"] - ) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["prewarm"] - ) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["public_inference"] - ) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["storage"] - ) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "billing_tags", - "cpus", - "default_callback_auth", - "default_callback_url", - "gpu_type", - "gpus", - "high_priority", - "labels", - "max_workers", - "memory", - "metadata", - "min_workers", - "model_bundle_id", - "optimize_costs", - "per_worker", - "post_inference_hooks", - "prewarm", - "public_inference", - "storage", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - billing_tags: typing.Union[ - MetaOapg.properties.billing_tags, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - cpus: typing.Union[ - MetaOapg.properties.cpus, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - default_callback_auth: typing.Union["CallbackAuth", schemas.Unset] = schemas.unset, - default_callback_url: typing.Union[ - MetaOapg.properties.default_callback_url, str, schemas.Unset - ] = schemas.unset, - gpu_type: typing.Union["GpuType", schemas.Unset] = schemas.unset, - gpus: typing.Union[MetaOapg.properties.gpus, decimal.Decimal, int, schemas.Unset] = schemas.unset, - high_priority: typing.Union[MetaOapg.properties.high_priority, bool, schemas.Unset] = schemas.unset, - labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, schemas.Unset] = schemas.unset, - max_workers: typing.Union[MetaOapg.properties.max_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - memory: typing.Union[ - MetaOapg.properties.memory, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - metadata: typing.Union[ - MetaOapg.properties.metadata, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - min_workers: typing.Union[MetaOapg.properties.min_workers, decimal.Decimal, int, schemas.Unset] = schemas.unset, - model_bundle_id: typing.Union[MetaOapg.properties.model_bundle_id, str, schemas.Unset] = schemas.unset, - optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, bool, schemas.Unset] = schemas.unset, - per_worker: typing.Union[MetaOapg.properties.per_worker, decimal.Decimal, int, schemas.Unset] = schemas.unset, - post_inference_hooks: typing.Union[ - MetaOapg.properties.post_inference_hooks, list, tuple, schemas.Unset - ] = schemas.unset, - prewarm: typing.Union[MetaOapg.properties.prewarm, bool, schemas.Unset] = schemas.unset, - public_inference: typing.Union[MetaOapg.properties.public_inference, bool, schemas.Unset] = schemas.unset, - storage: typing.Union[ - MetaOapg.properties.storage, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - schemas.Unset, - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateModelEndpointV1Request": - return super().__new__( - cls, - *_args, - billing_tags=billing_tags, - cpus=cpus, - default_callback_auth=default_callback_auth, - default_callback_url=default_callback_url, - gpu_type=gpu_type, - gpus=gpus, - high_priority=high_priority, - labels=labels, - max_workers=max_workers, - memory=memory, - metadata=metadata, - min_workers=min_workers, - model_bundle_id=model_bundle_id, - optimize_costs=optimize_costs, - per_worker=per_worker, - post_inference_hooks=post_inference_hooks, - prewarm=prewarm, - public_inference=public_inference, - storage=storage, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.callback_auth import CallbackAuth -from launch_client.model.gpu_type import GpuType diff --git a/launch/api_client/model/update_model_endpoint_v1_response.py b/launch/api_client/model/update_model_endpoint_v1_response.py index e65d917f..0c5ff98f 100644 --- a/launch/api_client/model/update_model_endpoint_v1_response.py +++ b/launch/api_client/model/update_model_endpoint_v1_response.py @@ -23,93 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class UpdateModelEndpointV1Response(schemas.DictSchema): +class UpdateModelEndpointV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "endpoint_creation_task_id", } - + class properties: endpoint_creation_task_id = schemas.StrSchema __annotations__ = { "endpoint_creation_task_id": endpoint_creation_task_id, } - + endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def __getitem__(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["endpoint_creation_task_id"]) -> MetaOapg.properties.endpoint_creation_task_id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["endpoint_creation_task_id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + endpoint_creation_task_id: typing.Union[MetaOapg.properties.endpoint_creation_task_id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateModelEndpointV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateModelEndpointV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_model_endpoint_v1_response.pyi b/launch/api_client/model/update_model_endpoint_v1_response.pyi deleted file mode 100644 index cc0ef32b..00000000 --- a/launch/api_client/model/update_model_endpoint_v1_response.pyi +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateModelEndpointV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "endpoint_creation_task_id", - } - - class properties: - endpoint_creation_task_id = schemas.StrSchema - __annotations__ = { - "endpoint_creation_task_id": endpoint_creation_task_id, - } - endpoint_creation_task_id: MetaOapg.properties.endpoint_creation_task_id - - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["endpoint_creation_task_id"] - ) -> MetaOapg.properties.endpoint_creation_task_id: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["endpoint_creation_task_id",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - endpoint_creation_task_id: typing.Union[ - MetaOapg.properties.endpoint_creation_task_id, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateModelEndpointV1Response": - return super().__new__( - cls, - *_args, - endpoint_creation_task_id=endpoint_creation_task_id, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_sg_lang_model_endpoint_request.py b/launch/api_client/model/update_sg_lang_model_endpoint_request.py new file mode 100644 index 00000000..80834a9c --- /dev/null +++ b/launch/api_client/model/update_sg_lang_model_endpoint_request.py @@ -0,0 +1,3512 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UpdateSGLangModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "sglang": "SGLANG", + } + + @schemas.classproperty + def SGLANG(cls): + return cls("sglang") + + + class inference_framework_image_tag( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'inference_framework_image_tag': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_shards( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_shards': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class force_bundle_recreation( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'force_bundle_recreation': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class trust_remote_code( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'trust_remote_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tp_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tp_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_tokenizer_init( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_tokenizer_init': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class kv_cache_dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'kv_cache_dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization_param_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization_param_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class context_length( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'context_length': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class device( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'device': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class served_model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'served_model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class is_embedding( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'is_embedding': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class mem_fraction_static( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'mem_fraction_static': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_running_requests( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_running_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_total_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_total_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chunked_prefill_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chunked_prefill_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_prefill_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_prefill_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class schedule_policy( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'schedule_policy': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class schedule_conservativeness( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'schedule_conservativeness': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpu_offload_gb( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cpu_offload_gb': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prefill_only_one_req( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prefill_only_one_req': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class stream_interval( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'stream_interval': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class random_seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'random_seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class constrained_json_whitespace_pattern( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'constrained_json_whitespace_pattern': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class watchdog_timeout( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'watchdog_timeout': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class download_dir( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'download_dir': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class base_gpu_id( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'base_gpu_id': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class log_level( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'log_level': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class log_level_http( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'log_level_http': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class log_requests( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'log_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class show_time_cost( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'show_time_cost': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class decode_log_interval( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'decode_log_interval': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class api_key( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'api_key': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class file_storage_pth( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'file_storage_pth': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_cache_report( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_cache_report': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class data_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'data_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_balance_method( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_balance_method': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class expert_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'expert_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dist_init_addr( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dist_init_addr': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class nnodes( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nnodes': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class node_rank( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'node_rank': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class json_model_override_args( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'json_model_override_args': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class lora_paths( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'lora_paths': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_loras_per_batch( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_loras_per_batch': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class attention_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'attention_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class sampling_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'sampling_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class grammar_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'grammar_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_algorithm( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_algorithm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_draft_model_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_draft_model_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_num_steps( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_num_steps': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_num_draft_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_num_draft_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class speculative_eagle_topk( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'speculative_eagle_topk': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_double_sparsity( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_double_sparsity': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_channel_config_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_channel_config_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_heavy_channel_num( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_heavy_channel_num': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_heavy_token_num( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_heavy_token_num': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_heavy_channel_type( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_heavy_channel_type': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class ds_sparse_decode_threshold( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ds_sparse_decode_threshold': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_radix_cache( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_radix_cache': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_jump_forward( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_jump_forward': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_cuda_graph( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_cuda_graph': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_cuda_graph_padding( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_cuda_graph_padding': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_outlines_disk_cache( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_outlines_disk_cache': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_custom_all_reduce( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_custom_all_reduce': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_mla( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_mla': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_overlap_schedule( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_overlap_schedule': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_mixed_chunk( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_mixed_chunk': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_dp_attention( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_dp_attention': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_ep_moe( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_ep_moe': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_torch_compile( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_torch_compile': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class torch_compile_max_bs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'torch_compile_max_bs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cuda_graph_max_bs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cuda_graph_max_bs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cuda_graph_bs( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.IntSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cuda_graph_bs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class torchao_config( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'torchao_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_nan_detection( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_nan_detection': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_p2p_check( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_p2p_check': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class triton_attention_reduce_in_fp32( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'triton_attention_reduce_in_fp32': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class triton_attention_num_kv_splits( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'triton_attention_num_kv_splits': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_continuous_decode_steps( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_continuous_decode_steps': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class delete_ckpt_after_loading( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'delete_ckpt_after_loading': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_memory_saver( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_memory_saver': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class allow_auto_truncate( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'allow_auto_truncate': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_custom_logit_processor( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_custom_logit_processor': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tool_call_parser( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tool_call_parser': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class huggingface_repo( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'huggingface_repo': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "model_name": model_name, + "source": source, + "inference_framework": inference_framework, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "metadata": metadata, + "force_bundle_recreation": force_bundle_recreation, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "trust_remote_code": trust_remote_code, + "tp_size": tp_size, + "skip_tokenizer_init": skip_tokenizer_init, + "load_format": load_format, + "dtype": dtype, + "kv_cache_dtype": kv_cache_dtype, + "quantization_param_path": quantization_param_path, + "quantization": quantization, + "context_length": context_length, + "device": device, + "served_model_name": served_model_name, + "chat_template": chat_template, + "is_embedding": is_embedding, + "revision": revision, + "mem_fraction_static": mem_fraction_static, + "max_running_requests": max_running_requests, + "max_total_tokens": max_total_tokens, + "chunked_prefill_size": chunked_prefill_size, + "max_prefill_tokens": max_prefill_tokens, + "schedule_policy": schedule_policy, + "schedule_conservativeness": schedule_conservativeness, + "cpu_offload_gb": cpu_offload_gb, + "prefill_only_one_req": prefill_only_one_req, + "stream_interval": stream_interval, + "random_seed": random_seed, + "constrained_json_whitespace_pattern": constrained_json_whitespace_pattern, + "watchdog_timeout": watchdog_timeout, + "download_dir": download_dir, + "base_gpu_id": base_gpu_id, + "log_level": log_level, + "log_level_http": log_level_http, + "log_requests": log_requests, + "show_time_cost": show_time_cost, + "enable_metrics": enable_metrics, + "decode_log_interval": decode_log_interval, + "api_key": api_key, + "file_storage_pth": file_storage_pth, + "enable_cache_report": enable_cache_report, + "data_parallel_size": data_parallel_size, + "load_balance_method": load_balance_method, + "expert_parallel_size": expert_parallel_size, + "dist_init_addr": dist_init_addr, + "nnodes": nnodes, + "node_rank": node_rank, + "json_model_override_args": json_model_override_args, + "lora_paths": lora_paths, + "max_loras_per_batch": max_loras_per_batch, + "attention_backend": attention_backend, + "sampling_backend": sampling_backend, + "grammar_backend": grammar_backend, + "speculative_algorithm": speculative_algorithm, + "speculative_draft_model_path": speculative_draft_model_path, + "speculative_num_steps": speculative_num_steps, + "speculative_num_draft_tokens": speculative_num_draft_tokens, + "speculative_eagle_topk": speculative_eagle_topk, + "enable_double_sparsity": enable_double_sparsity, + "ds_channel_config_path": ds_channel_config_path, + "ds_heavy_channel_num": ds_heavy_channel_num, + "ds_heavy_token_num": ds_heavy_token_num, + "ds_heavy_channel_type": ds_heavy_channel_type, + "ds_sparse_decode_threshold": ds_sparse_decode_threshold, + "disable_radix_cache": disable_radix_cache, + "disable_jump_forward": disable_jump_forward, + "disable_cuda_graph": disable_cuda_graph, + "disable_cuda_graph_padding": disable_cuda_graph_padding, + "disable_outlines_disk_cache": disable_outlines_disk_cache, + "disable_custom_all_reduce": disable_custom_all_reduce, + "disable_mla": disable_mla, + "disable_overlap_schedule": disable_overlap_schedule, + "enable_mixed_chunk": enable_mixed_chunk, + "enable_dp_attention": enable_dp_attention, + "enable_ep_moe": enable_ep_moe, + "enable_torch_compile": enable_torch_compile, + "torch_compile_max_bs": torch_compile_max_bs, + "cuda_graph_max_bs": cuda_graph_max_bs, + "cuda_graph_bs": cuda_graph_bs, + "torchao_config": torchao_config, + "enable_nan_detection": enable_nan_detection, + "enable_p2p_check": enable_p2p_check, + "triton_attention_reduce_in_fp32": triton_attention_reduce_in_fp32, + "triton_attention_num_kv_splits": triton_attention_num_kv_splits, + "num_continuous_decode_steps": num_continuous_decode_steps, + "delete_ckpt_after_loading": delete_ckpt_after_loading, + "enable_memory_saver": enable_memory_saver, + "allow_auto_truncate": allow_auto_truncate, + "enable_custom_logit_processor": enable_custom_logit_processor, + "tool_call_parser": tool_call_parser, + "huggingface_repo": huggingface_repo, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tp_size"]) -> MetaOapg.properties.tp_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> MetaOapg.properties.kv_cache_dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["context_length"]) -> MetaOapg.properties.context_length: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["device"]) -> MetaOapg.properties.device: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["is_embedding"]) -> MetaOapg.properties.is_embedding: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["mem_fraction_static"]) -> MetaOapg.properties.mem_fraction_static: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_running_requests"]) -> MetaOapg.properties.max_running_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_total_tokens"]) -> MetaOapg.properties.max_total_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> MetaOapg.properties.chunked_prefill_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> MetaOapg.properties.max_prefill_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["schedule_policy"]) -> MetaOapg.properties.schedule_policy: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> MetaOapg.properties.schedule_conservativeness: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> MetaOapg.properties.cpu_offload_gb: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> MetaOapg.properties.prefill_only_one_req: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["stream_interval"]) -> MetaOapg.properties.stream_interval: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["random_seed"]) -> MetaOapg.properties.random_seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> MetaOapg.properties.constrained_json_whitespace_pattern: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["watchdog_timeout"]) -> MetaOapg.properties.watchdog_timeout: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["download_dir"]) -> MetaOapg.properties.download_dir: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["base_gpu_id"]) -> MetaOapg.properties.base_gpu_id: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["log_level"]) -> MetaOapg.properties.log_level: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["log_level_http"]) -> MetaOapg.properties.log_level_http: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["log_requests"]) -> MetaOapg.properties.log_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["show_time_cost"]) -> MetaOapg.properties.show_time_cost: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_metrics"]) -> MetaOapg.properties.enable_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["decode_log_interval"]) -> MetaOapg.properties.decode_log_interval: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["api_key"]) -> MetaOapg.properties.api_key: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["file_storage_pth"]) -> MetaOapg.properties.file_storage_pth: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_cache_report"]) -> MetaOapg.properties.enable_cache_report: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["data_parallel_size"]) -> MetaOapg.properties.data_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_balance_method"]) -> MetaOapg.properties.load_balance_method: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["expert_parallel_size"]) -> MetaOapg.properties.expert_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dist_init_addr"]) -> MetaOapg.properties.dist_init_addr: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nnodes"]) -> MetaOapg.properties.nnodes: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["node_rank"]) -> MetaOapg.properties.node_rank: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["json_model_override_args"]) -> MetaOapg.properties.json_model_override_args: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["lora_paths"]) -> MetaOapg.properties.lora_paths: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> MetaOapg.properties.max_loras_per_batch: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["sampling_backend"]) -> MetaOapg.properties.sampling_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["grammar_backend"]) -> MetaOapg.properties.grammar_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_algorithm"]) -> MetaOapg.properties.speculative_algorithm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> MetaOapg.properties.speculative_draft_model_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_num_steps"]) -> MetaOapg.properties.speculative_num_steps: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> MetaOapg.properties.speculative_num_draft_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> MetaOapg.properties.speculative_eagle_topk: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> MetaOapg.properties.enable_double_sparsity: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> MetaOapg.properties.ds_channel_config_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> MetaOapg.properties.ds_heavy_channel_num: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> MetaOapg.properties.ds_heavy_token_num: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> MetaOapg.properties.ds_heavy_channel_type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> MetaOapg.properties.ds_sparse_decode_threshold: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_radix_cache"]) -> MetaOapg.properties.disable_radix_cache: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_jump_forward"]) -> MetaOapg.properties.disable_jump_forward: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> MetaOapg.properties.disable_cuda_graph: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> MetaOapg.properties.disable_cuda_graph_padding: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> MetaOapg.properties.disable_outlines_disk_cache: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> MetaOapg.properties.disable_custom_all_reduce: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_mla"]) -> MetaOapg.properties.disable_mla: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> MetaOapg.properties.disable_overlap_schedule: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> MetaOapg.properties.enable_mixed_chunk: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_dp_attention"]) -> MetaOapg.properties.enable_dp_attention: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_ep_moe"]) -> MetaOapg.properties.enable_ep_moe: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_torch_compile"]) -> MetaOapg.properties.enable_torch_compile: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> MetaOapg.properties.torch_compile_max_bs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> MetaOapg.properties.cuda_graph_max_bs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> MetaOapg.properties.cuda_graph_bs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["torchao_config"]) -> MetaOapg.properties.torchao_config: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_nan_detection"]) -> MetaOapg.properties.enable_nan_detection: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_p2p_check"]) -> MetaOapg.properties.enable_p2p_check: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> MetaOapg.properties.triton_attention_reduce_in_fp32: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> MetaOapg.properties.triton_attention_num_kv_splits: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> MetaOapg.properties.num_continuous_decode_steps: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> MetaOapg.properties.delete_ckpt_after_loading: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_memory_saver"]) -> MetaOapg.properties.enable_memory_saver: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> MetaOapg.properties.allow_auto_truncate: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> MetaOapg.properties.enable_custom_logit_processor: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["huggingface_repo"]) -> MetaOapg.properties.huggingface_repo: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tp_size"]) -> typing.Union[MetaOapg.properties.tp_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["kv_cache_dtype"]) -> typing.Union[MetaOapg.properties.kv_cache_dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["context_length"]) -> typing.Union[MetaOapg.properties.context_length, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["device"]) -> typing.Union[MetaOapg.properties.device, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["is_embedding"]) -> typing.Union[MetaOapg.properties.is_embedding, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["mem_fraction_static"]) -> typing.Union[MetaOapg.properties.mem_fraction_static, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_running_requests"]) -> typing.Union[MetaOapg.properties.max_running_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_total_tokens"]) -> typing.Union[MetaOapg.properties.max_total_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chunked_prefill_size"]) -> typing.Union[MetaOapg.properties.chunked_prefill_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_prefill_tokens"]) -> typing.Union[MetaOapg.properties.max_prefill_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["schedule_policy"]) -> typing.Union[MetaOapg.properties.schedule_policy, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["schedule_conservativeness"]) -> typing.Union[MetaOapg.properties.schedule_conservativeness, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpu_offload_gb"]) -> typing.Union[MetaOapg.properties.cpu_offload_gb, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prefill_only_one_req"]) -> typing.Union[MetaOapg.properties.prefill_only_one_req, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["stream_interval"]) -> typing.Union[MetaOapg.properties.stream_interval, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["random_seed"]) -> typing.Union[MetaOapg.properties.random_seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["constrained_json_whitespace_pattern"]) -> typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["watchdog_timeout"]) -> typing.Union[MetaOapg.properties.watchdog_timeout, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["download_dir"]) -> typing.Union[MetaOapg.properties.download_dir, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["base_gpu_id"]) -> typing.Union[MetaOapg.properties.base_gpu_id, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["log_level"]) -> typing.Union[MetaOapg.properties.log_level, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["log_level_http"]) -> typing.Union[MetaOapg.properties.log_level_http, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["log_requests"]) -> typing.Union[MetaOapg.properties.log_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["show_time_cost"]) -> typing.Union[MetaOapg.properties.show_time_cost, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_metrics"]) -> typing.Union[MetaOapg.properties.enable_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["decode_log_interval"]) -> typing.Union[MetaOapg.properties.decode_log_interval, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["api_key"]) -> typing.Union[MetaOapg.properties.api_key, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["file_storage_pth"]) -> typing.Union[MetaOapg.properties.file_storage_pth, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_cache_report"]) -> typing.Union[MetaOapg.properties.enable_cache_report, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["data_parallel_size"]) -> typing.Union[MetaOapg.properties.data_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_balance_method"]) -> typing.Union[MetaOapg.properties.load_balance_method, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["expert_parallel_size"]) -> typing.Union[MetaOapg.properties.expert_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dist_init_addr"]) -> typing.Union[MetaOapg.properties.dist_init_addr, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nnodes"]) -> typing.Union[MetaOapg.properties.nnodes, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["node_rank"]) -> typing.Union[MetaOapg.properties.node_rank, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["json_model_override_args"]) -> typing.Union[MetaOapg.properties.json_model_override_args, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["lora_paths"]) -> typing.Union[MetaOapg.properties.lora_paths, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_loras_per_batch"]) -> typing.Union[MetaOapg.properties.max_loras_per_batch, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["sampling_backend"]) -> typing.Union[MetaOapg.properties.sampling_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["grammar_backend"]) -> typing.Union[MetaOapg.properties.grammar_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_algorithm"]) -> typing.Union[MetaOapg.properties.speculative_algorithm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_draft_model_path"]) -> typing.Union[MetaOapg.properties.speculative_draft_model_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_steps"]) -> typing.Union[MetaOapg.properties.speculative_num_steps, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_num_draft_tokens"]) -> typing.Union[MetaOapg.properties.speculative_num_draft_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["speculative_eagle_topk"]) -> typing.Union[MetaOapg.properties.speculative_eagle_topk, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_double_sparsity"]) -> typing.Union[MetaOapg.properties.enable_double_sparsity, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_channel_config_path"]) -> typing.Union[MetaOapg.properties.ds_channel_config_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_num, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_token_num"]) -> typing.Union[MetaOapg.properties.ds_heavy_token_num, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_heavy_channel_type"]) -> typing.Union[MetaOapg.properties.ds_heavy_channel_type, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["ds_sparse_decode_threshold"]) -> typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_radix_cache"]) -> typing.Union[MetaOapg.properties.disable_radix_cache, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_jump_forward"]) -> typing.Union[MetaOapg.properties.disable_jump_forward, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_cuda_graph_padding"]) -> typing.Union[MetaOapg.properties.disable_cuda_graph_padding, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_outlines_disk_cache"]) -> typing.Union[MetaOapg.properties.disable_outlines_disk_cache, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_custom_all_reduce"]) -> typing.Union[MetaOapg.properties.disable_custom_all_reduce, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_mla"]) -> typing.Union[MetaOapg.properties.disable_mla, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_overlap_schedule"]) -> typing.Union[MetaOapg.properties.disable_overlap_schedule, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_mixed_chunk"]) -> typing.Union[MetaOapg.properties.enable_mixed_chunk, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_dp_attention"]) -> typing.Union[MetaOapg.properties.enable_dp_attention, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_ep_moe"]) -> typing.Union[MetaOapg.properties.enable_ep_moe, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_torch_compile"]) -> typing.Union[MetaOapg.properties.enable_torch_compile, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["torch_compile_max_bs"]) -> typing.Union[MetaOapg.properties.torch_compile_max_bs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_max_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_max_bs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cuda_graph_bs"]) -> typing.Union[MetaOapg.properties.cuda_graph_bs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["torchao_config"]) -> typing.Union[MetaOapg.properties.torchao_config, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_nan_detection"]) -> typing.Union[MetaOapg.properties.enable_nan_detection, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_p2p_check"]) -> typing.Union[MetaOapg.properties.enable_p2p_check, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_reduce_in_fp32"]) -> typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["triton_attention_num_kv_splits"]) -> typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_continuous_decode_steps"]) -> typing.Union[MetaOapg.properties.num_continuous_decode_steps, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["delete_ckpt_after_loading"]) -> typing.Union[MetaOapg.properties.delete_ckpt_after_loading, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_memory_saver"]) -> typing.Union[MetaOapg.properties.enable_memory_saver, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["allow_auto_truncate"]) -> typing.Union[MetaOapg.properties.allow_auto_truncate, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_custom_logit_processor"]) -> typing.Union[MetaOapg.properties.enable_custom_logit_processor, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["huggingface_repo"]) -> typing.Union[MetaOapg.properties.huggingface_repo, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "trust_remote_code", "tp_size", "skip_tokenizer_init", "load_format", "dtype", "kv_cache_dtype", "quantization_param_path", "quantization", "context_length", "device", "served_model_name", "chat_template", "is_embedding", "revision", "mem_fraction_static", "max_running_requests", "max_total_tokens", "chunked_prefill_size", "max_prefill_tokens", "schedule_policy", "schedule_conservativeness", "cpu_offload_gb", "prefill_only_one_req", "stream_interval", "random_seed", "constrained_json_whitespace_pattern", "watchdog_timeout", "download_dir", "base_gpu_id", "log_level", "log_level_http", "log_requests", "show_time_cost", "enable_metrics", "decode_log_interval", "api_key", "file_storage_pth", "enable_cache_report", "data_parallel_size", "load_balance_method", "expert_parallel_size", "dist_init_addr", "nnodes", "node_rank", "json_model_override_args", "lora_paths", "max_loras_per_batch", "attention_backend", "sampling_backend", "grammar_backend", "speculative_algorithm", "speculative_draft_model_path", "speculative_num_steps", "speculative_num_draft_tokens", "speculative_eagle_topk", "enable_double_sparsity", "ds_channel_config_path", "ds_heavy_channel_num", "ds_heavy_token_num", "ds_heavy_channel_type", "ds_sparse_decode_threshold", "disable_radix_cache", "disable_jump_forward", "disable_cuda_graph", "disable_cuda_graph_padding", "disable_outlines_disk_cache", "disable_custom_all_reduce", "disable_mla", "disable_overlap_schedule", "enable_mixed_chunk", "enable_dp_attention", "enable_ep_moe", "enable_torch_compile", "torch_compile_max_bs", "cuda_graph_max_bs", "cuda_graph_bs", "torchao_config", "enable_nan_detection", "enable_p2p_check", "triton_attention_reduce_in_fp32", "triton_attention_num_kv_splits", "num_continuous_decode_steps", "delete_ckpt_after_loading", "enable_memory_saver", "allow_auto_truncate", "enable_custom_logit_processor", "tool_call_parser", "huggingface_repo", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, + min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, + tp_size: typing.Union[MetaOapg.properties.tp_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, + load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, + dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, + kv_cache_dtype: typing.Union[MetaOapg.properties.kv_cache_dtype, None, str, schemas.Unset] = schemas.unset, + quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, + quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, + context_length: typing.Union[MetaOapg.properties.context_length, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + device: typing.Union[MetaOapg.properties.device, None, str, schemas.Unset] = schemas.unset, + served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + is_embedding: typing.Union[MetaOapg.properties.is_embedding, None, bool, schemas.Unset] = schemas.unset, + revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, + mem_fraction_static: typing.Union[MetaOapg.properties.mem_fraction_static, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + max_running_requests: typing.Union[MetaOapg.properties.max_running_requests, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_total_tokens: typing.Union[MetaOapg.properties.max_total_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + chunked_prefill_size: typing.Union[MetaOapg.properties.chunked_prefill_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_prefill_tokens: typing.Union[MetaOapg.properties.max_prefill_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + schedule_policy: typing.Union[MetaOapg.properties.schedule_policy, None, str, schemas.Unset] = schemas.unset, + schedule_conservativeness: typing.Union[MetaOapg.properties.schedule_conservativeness, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + cpu_offload_gb: typing.Union[MetaOapg.properties.cpu_offload_gb, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + prefill_only_one_req: typing.Union[MetaOapg.properties.prefill_only_one_req, None, bool, schemas.Unset] = schemas.unset, + stream_interval: typing.Union[MetaOapg.properties.stream_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + random_seed: typing.Union[MetaOapg.properties.random_seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + constrained_json_whitespace_pattern: typing.Union[MetaOapg.properties.constrained_json_whitespace_pattern, None, str, schemas.Unset] = schemas.unset, + watchdog_timeout: typing.Union[MetaOapg.properties.watchdog_timeout, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + download_dir: typing.Union[MetaOapg.properties.download_dir, None, str, schemas.Unset] = schemas.unset, + base_gpu_id: typing.Union[MetaOapg.properties.base_gpu_id, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + log_level: typing.Union[MetaOapg.properties.log_level, None, str, schemas.Unset] = schemas.unset, + log_level_http: typing.Union[MetaOapg.properties.log_level_http, None, str, schemas.Unset] = schemas.unset, + log_requests: typing.Union[MetaOapg.properties.log_requests, None, bool, schemas.Unset] = schemas.unset, + show_time_cost: typing.Union[MetaOapg.properties.show_time_cost, None, bool, schemas.Unset] = schemas.unset, + enable_metrics: typing.Union[MetaOapg.properties.enable_metrics, None, bool, schemas.Unset] = schemas.unset, + decode_log_interval: typing.Union[MetaOapg.properties.decode_log_interval, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + api_key: typing.Union[MetaOapg.properties.api_key, None, str, schemas.Unset] = schemas.unset, + file_storage_pth: typing.Union[MetaOapg.properties.file_storage_pth, None, str, schemas.Unset] = schemas.unset, + enable_cache_report: typing.Union[MetaOapg.properties.enable_cache_report, None, bool, schemas.Unset] = schemas.unset, + data_parallel_size: typing.Union[MetaOapg.properties.data_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + load_balance_method: typing.Union[MetaOapg.properties.load_balance_method, None, str, schemas.Unset] = schemas.unset, + expert_parallel_size: typing.Union[MetaOapg.properties.expert_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + dist_init_addr: typing.Union[MetaOapg.properties.dist_init_addr, None, str, schemas.Unset] = schemas.unset, + nnodes: typing.Union[MetaOapg.properties.nnodes, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + node_rank: typing.Union[MetaOapg.properties.node_rank, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + json_model_override_args: typing.Union[MetaOapg.properties.json_model_override_args, None, str, schemas.Unset] = schemas.unset, + lora_paths: typing.Union[MetaOapg.properties.lora_paths, list, tuple, None, schemas.Unset] = schemas.unset, + max_loras_per_batch: typing.Union[MetaOapg.properties.max_loras_per_batch, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, + sampling_backend: typing.Union[MetaOapg.properties.sampling_backend, None, str, schemas.Unset] = schemas.unset, + grammar_backend: typing.Union[MetaOapg.properties.grammar_backend, None, str, schemas.Unset] = schemas.unset, + speculative_algorithm: typing.Union[MetaOapg.properties.speculative_algorithm, None, str, schemas.Unset] = schemas.unset, + speculative_draft_model_path: typing.Union[MetaOapg.properties.speculative_draft_model_path, None, str, schemas.Unset] = schemas.unset, + speculative_num_steps: typing.Union[MetaOapg.properties.speculative_num_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + speculative_num_draft_tokens: typing.Union[MetaOapg.properties.speculative_num_draft_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + speculative_eagle_topk: typing.Union[MetaOapg.properties.speculative_eagle_topk, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enable_double_sparsity: typing.Union[MetaOapg.properties.enable_double_sparsity, None, bool, schemas.Unset] = schemas.unset, + ds_channel_config_path: typing.Union[MetaOapg.properties.ds_channel_config_path, None, str, schemas.Unset] = schemas.unset, + ds_heavy_channel_num: typing.Union[MetaOapg.properties.ds_heavy_channel_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + ds_heavy_token_num: typing.Union[MetaOapg.properties.ds_heavy_token_num, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + ds_heavy_channel_type: typing.Union[MetaOapg.properties.ds_heavy_channel_type, None, str, schemas.Unset] = schemas.unset, + ds_sparse_decode_threshold: typing.Union[MetaOapg.properties.ds_sparse_decode_threshold, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + disable_radix_cache: typing.Union[MetaOapg.properties.disable_radix_cache, None, bool, schemas.Unset] = schemas.unset, + disable_jump_forward: typing.Union[MetaOapg.properties.disable_jump_forward, None, bool, schemas.Unset] = schemas.unset, + disable_cuda_graph: typing.Union[MetaOapg.properties.disable_cuda_graph, None, bool, schemas.Unset] = schemas.unset, + disable_cuda_graph_padding: typing.Union[MetaOapg.properties.disable_cuda_graph_padding, None, bool, schemas.Unset] = schemas.unset, + disable_outlines_disk_cache: typing.Union[MetaOapg.properties.disable_outlines_disk_cache, None, bool, schemas.Unset] = schemas.unset, + disable_custom_all_reduce: typing.Union[MetaOapg.properties.disable_custom_all_reduce, None, bool, schemas.Unset] = schemas.unset, + disable_mla: typing.Union[MetaOapg.properties.disable_mla, None, bool, schemas.Unset] = schemas.unset, + disable_overlap_schedule: typing.Union[MetaOapg.properties.disable_overlap_schedule, None, bool, schemas.Unset] = schemas.unset, + enable_mixed_chunk: typing.Union[MetaOapg.properties.enable_mixed_chunk, None, bool, schemas.Unset] = schemas.unset, + enable_dp_attention: typing.Union[MetaOapg.properties.enable_dp_attention, None, bool, schemas.Unset] = schemas.unset, + enable_ep_moe: typing.Union[MetaOapg.properties.enable_ep_moe, None, bool, schemas.Unset] = schemas.unset, + enable_torch_compile: typing.Union[MetaOapg.properties.enable_torch_compile, None, bool, schemas.Unset] = schemas.unset, + torch_compile_max_bs: typing.Union[MetaOapg.properties.torch_compile_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + cuda_graph_max_bs: typing.Union[MetaOapg.properties.cuda_graph_max_bs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + cuda_graph_bs: typing.Union[MetaOapg.properties.cuda_graph_bs, list, tuple, None, schemas.Unset] = schemas.unset, + torchao_config: typing.Union[MetaOapg.properties.torchao_config, None, str, schemas.Unset] = schemas.unset, + enable_nan_detection: typing.Union[MetaOapg.properties.enable_nan_detection, None, bool, schemas.Unset] = schemas.unset, + enable_p2p_check: typing.Union[MetaOapg.properties.enable_p2p_check, None, bool, schemas.Unset] = schemas.unset, + triton_attention_reduce_in_fp32: typing.Union[MetaOapg.properties.triton_attention_reduce_in_fp32, None, bool, schemas.Unset] = schemas.unset, + triton_attention_num_kv_splits: typing.Union[MetaOapg.properties.triton_attention_num_kv_splits, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + num_continuous_decode_steps: typing.Union[MetaOapg.properties.num_continuous_decode_steps, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + delete_ckpt_after_loading: typing.Union[MetaOapg.properties.delete_ckpt_after_loading, None, bool, schemas.Unset] = schemas.unset, + enable_memory_saver: typing.Union[MetaOapg.properties.enable_memory_saver, None, bool, schemas.Unset] = schemas.unset, + allow_auto_truncate: typing.Union[MetaOapg.properties.allow_auto_truncate, None, bool, schemas.Unset] = schemas.unset, + enable_custom_logit_processor: typing.Union[MetaOapg.properties.enable_custom_logit_processor, None, bool, schemas.Unset] = schemas.unset, + tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, + huggingface_repo: typing.Union[MetaOapg.properties.huggingface_repo, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateSGLangModelEndpointRequest': + return super().__new__( + cls, + *_args, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + model_name=model_name, + source=source, + inference_framework=inference_framework, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + metadata=metadata, + force_bundle_recreation=force_bundle_recreation, + min_workers=min_workers, + max_workers=max_workers, + per_worker=per_worker, + labels=labels, + trust_remote_code=trust_remote_code, + tp_size=tp_size, + skip_tokenizer_init=skip_tokenizer_init, + load_format=load_format, + dtype=dtype, + kv_cache_dtype=kv_cache_dtype, + quantization_param_path=quantization_param_path, + quantization=quantization, + context_length=context_length, + device=device, + served_model_name=served_model_name, + chat_template=chat_template, + is_embedding=is_embedding, + revision=revision, + mem_fraction_static=mem_fraction_static, + max_running_requests=max_running_requests, + max_total_tokens=max_total_tokens, + chunked_prefill_size=chunked_prefill_size, + max_prefill_tokens=max_prefill_tokens, + schedule_policy=schedule_policy, + schedule_conservativeness=schedule_conservativeness, + cpu_offload_gb=cpu_offload_gb, + prefill_only_one_req=prefill_only_one_req, + stream_interval=stream_interval, + random_seed=random_seed, + constrained_json_whitespace_pattern=constrained_json_whitespace_pattern, + watchdog_timeout=watchdog_timeout, + download_dir=download_dir, + base_gpu_id=base_gpu_id, + log_level=log_level, + log_level_http=log_level_http, + log_requests=log_requests, + show_time_cost=show_time_cost, + enable_metrics=enable_metrics, + decode_log_interval=decode_log_interval, + api_key=api_key, + file_storage_pth=file_storage_pth, + enable_cache_report=enable_cache_report, + data_parallel_size=data_parallel_size, + load_balance_method=load_balance_method, + expert_parallel_size=expert_parallel_size, + dist_init_addr=dist_init_addr, + nnodes=nnodes, + node_rank=node_rank, + json_model_override_args=json_model_override_args, + lora_paths=lora_paths, + max_loras_per_batch=max_loras_per_batch, + attention_backend=attention_backend, + sampling_backend=sampling_backend, + grammar_backend=grammar_backend, + speculative_algorithm=speculative_algorithm, + speculative_draft_model_path=speculative_draft_model_path, + speculative_num_steps=speculative_num_steps, + speculative_num_draft_tokens=speculative_num_draft_tokens, + speculative_eagle_topk=speculative_eagle_topk, + enable_double_sparsity=enable_double_sparsity, + ds_channel_config_path=ds_channel_config_path, + ds_heavy_channel_num=ds_heavy_channel_num, + ds_heavy_token_num=ds_heavy_token_num, + ds_heavy_channel_type=ds_heavy_channel_type, + ds_sparse_decode_threshold=ds_sparse_decode_threshold, + disable_radix_cache=disable_radix_cache, + disable_jump_forward=disable_jump_forward, + disable_cuda_graph=disable_cuda_graph, + disable_cuda_graph_padding=disable_cuda_graph_padding, + disable_outlines_disk_cache=disable_outlines_disk_cache, + disable_custom_all_reduce=disable_custom_all_reduce, + disable_mla=disable_mla, + disable_overlap_schedule=disable_overlap_schedule, + enable_mixed_chunk=enable_mixed_chunk, + enable_dp_attention=enable_dp_attention, + enable_ep_moe=enable_ep_moe, + enable_torch_compile=enable_torch_compile, + torch_compile_max_bs=torch_compile_max_bs, + cuda_graph_max_bs=cuda_graph_max_bs, + cuda_graph_bs=cuda_graph_bs, + torchao_config=torchao_config, + enable_nan_detection=enable_nan_detection, + enable_p2p_check=enable_p2p_check, + triton_attention_reduce_in_fp32=triton_attention_reduce_in_fp32, + triton_attention_num_kv_splits=triton_attention_num_kv_splits, + num_continuous_decode_steps=num_continuous_decode_steps, + delete_ckpt_after_loading=delete_ckpt_after_loading, + enable_memory_saver=enable_memory_saver, + allow_auto_truncate=allow_auto_truncate, + enable_custom_logit_processor=enable_custom_logit_processor, + tool_call_parser=tool_call_parser, + huggingface_repo=huggingface_repo, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/update_text_generation_inference_model_endpoint_request.py b/launch/api_client/model/update_text_generation_inference_model_endpoint_request.py new file mode 100644 index 00000000..fb601999 --- /dev/null +++ b/launch/api_client/model/update_text_generation_inference_model_endpoint_request.py @@ -0,0 +1,952 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UpdateTextGenerationInferenceModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "text_generation_inference": "TEXT_GENERATION_INFERENCE", + } + + @schemas.classproperty + def TEXT_GENERATION_INFERENCE(cls): + return cls("text_generation_inference") + + + class inference_framework_image_tag( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'inference_framework_image_tag': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_shards( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_shards': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class force_bundle_recreation( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'force_bundle_recreation': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + __annotations__ = { + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "model_name": model_name, + "source": source, + "inference_framework": inference_framework, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "metadata": metadata, + "force_bundle_recreation": force_bundle_recreation, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, + min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateTextGenerationInferenceModelEndpointRequest': + return super().__new__( + cls, + *_args, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + model_name=model_name, + source=source, + inference_framework=inference_framework, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + metadata=metadata, + force_bundle_recreation=force_bundle_recreation, + min_workers=min_workers, + max_workers=max_workers, + per_worker=per_worker, + labels=labels, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/update_trigger_v1_request.py b/launch/api_client/model/update_trigger_v1_request.py index 892abd76..d36707a9 100644 --- a/launch/api_client/model/update_trigger_v1_request.py +++ b/launch/api_client/model/update_trigger_v1_request.py @@ -23,101 +23,99 @@ from launch.api_client import schemas # noqa: F401 -class UpdateTriggerV1Request(schemas.DictSchema): +class UpdateTriggerV1Request( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: + class properties: - cron_schedule = schemas.StrSchema - suspend = schemas.BoolSchema + + + class cron_schedule( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cron_schedule': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class suspend( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'suspend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) __annotations__ = { "cron_schedule": cron_schedule, "suspend": suspend, } - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: - ... - + def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suspend"]) -> MetaOapg.properties.suspend: - ... - + def __getitem__(self, name: typing_extensions.Literal["suspend"]) -> MetaOapg.properties.suspend: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cron_schedule", - "suspend", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["cron_schedule", "suspend", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cron_schedule"] - ) -> typing.Union[MetaOapg.properties.cron_schedule, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["cron_schedule"]) -> typing.Union[MetaOapg.properties.cron_schedule, schemas.Unset]: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["suspend"] - ) -> typing.Union[MetaOapg.properties.suspend, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["suspend"]) -> typing.Union[MetaOapg.properties.suspend, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cron_schedule", - "suspend", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["cron_schedule", "suspend", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, str, schemas.Unset] = schemas.unset, - suspend: typing.Union[MetaOapg.properties.suspend, bool, schemas.Unset] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, None, str, schemas.Unset] = schemas.unset, + suspend: typing.Union[MetaOapg.properties.suspend, None, bool, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateTriggerV1Request": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateTriggerV1Request': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_trigger_v1_request.pyi b/launch/api_client/model/update_trigger_v1_request.pyi deleted file mode 100644 index 7d31bb86..00000000 --- a/launch/api_client/model/update_trigger_v1_request.pyi +++ /dev/null @@ -1,111 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateTriggerV1Request(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - class properties: - cron_schedule = schemas.StrSchema - suspend = schemas.BoolSchema - __annotations__ = { - "cron_schedule": cron_schedule, - "suspend": suspend, - } - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["cron_schedule"]) -> MetaOapg.properties.cron_schedule: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["suspend"]) -> MetaOapg.properties.suspend: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cron_schedule", - "suspend", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["cron_schedule"] - ) -> typing.Union[MetaOapg.properties.cron_schedule, schemas.Unset]: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["suspend"] - ) -> typing.Union[MetaOapg.properties.suspend, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "cron_schedule", - "suspend", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - cron_schedule: typing.Union[MetaOapg.properties.cron_schedule, str, schemas.Unset] = schemas.unset, - suspend: typing.Union[MetaOapg.properties.suspend, bool, schemas.Unset] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateTriggerV1Request": - return super().__new__( - cls, - *_args, - cron_schedule=cron_schedule, - suspend=suspend, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_trigger_v1_response.py b/launch/api_client/model/update_trigger_v1_response.py index 418eea96..130d49d8 100644 --- a/launch/api_client/model/update_trigger_v1_response.py +++ b/launch/api_client/model/update_trigger_v1_response.py @@ -23,89 +23,57 @@ from launch.api_client import schemas # noqa: F401 -class UpdateTriggerV1Response(schemas.DictSchema): +class UpdateTriggerV1Response( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "success", } - + class properties: success = schemas.BoolSchema __annotations__ = { "success": success, } - + success: MetaOapg.properties.success - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["success", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["success", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + success: typing.Union[MetaOapg.properties.success, bool, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateTriggerV1Response": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateTriggerV1Response': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/update_trigger_v1_response.pyi b/launch/api_client/model/update_trigger_v1_response.pyi deleted file mode 100644 index 15903577..00000000 --- a/launch/api_client/model/update_trigger_v1_response.pyi +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class UpdateTriggerV1Response(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "success", - } - - class properties: - success = schemas.BoolSchema - __annotations__ = { - "success": success, - } - success: MetaOapg.properties.success - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["success"]) -> MetaOapg.properties.success: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["success",], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - success: typing.Union[ - MetaOapg.properties.success, - bool, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UpdateTriggerV1Response": - return super().__new__( - cls, - *_args, - success=success, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/update_vllm_model_endpoint_request.py b/launch/api_client/model/update_vllm_model_endpoint_request.py new file mode 100644 index 00000000..c7c113f3 --- /dev/null +++ b/launch/api_client/model/update_vllm_model_endpoint_request.py @@ -0,0 +1,2093 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UpdateVLLMModelEndpointRequest( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + @staticmethod + def quantize() -> typing.Type['Quantization']: + return Quantization + + + class checkpoint_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'checkpoint_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class post_inference_hooks( + schemas.ListBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneTupleMixin + ): + + + class MetaOapg: + items = schemas.StrSchema + + + def __new__( + cls, + *_args: typing.Union[list, tuple, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'post_inference_hooks': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cpus( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'cpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class gpus( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpus': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class memory( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'memory': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + @staticmethod + def gpu_type() -> typing.Type['GpuType']: + return GpuType + + + class storage( + schemas.ComposedSchema, + ): + + + class MetaOapg: + any_of_0 = schemas.StrSchema + any_of_1 = schemas.IntSchema + any_of_2 = schemas.NumberSchema + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + cls.any_of_2, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'storage': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class nodes_per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'nodes_per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class optimize_costs( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'optimize_costs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class prewarm( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'prewarm': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class high_priority( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'high_priority': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class billing_tags( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'billing_tags': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class default_callback_url( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'default_callback_url': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def default_callback_auth() -> typing.Type['CallbackAuth']: + return CallbackAuth + + + class public_inference( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'public_inference': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template_override( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_startup_metrics( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_startup_metrics': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + @staticmethod + def source() -> typing.Type['LLMSource']: + return LLMSource + + + class inference_framework( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "vllm": "VLLM", + } + + @schemas.classproperty + def VLLM(cls): + return cls("vllm") + + + class inference_framework_image_tag( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'inference_framework_image_tag': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_shards( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_shards': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class metadata( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'metadata': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class force_bundle_recreation( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'force_bundle_recreation': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class min_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'min_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_workers( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_workers': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class per_worker( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'per_worker': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class labels( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.StrSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, str, ], + ) -> 'labels': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class max_gpu_memory_utilization( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_gpu_memory_utilization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class attention_backend( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'attention_backend': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_model_len( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_model_len': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_seqs( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_seqs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enforce_eager( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enforce_eager': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class trust_remote_code( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'trust_remote_code': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class pipeline_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'pipeline_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tensor_parallel_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tensor_parallel_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_log_requests( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_log_requests': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class chat_template( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'chat_template': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tool_call_parser( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tool_call_parser': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_auto_tool_choice( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_auto_tool_choice': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class load_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'load_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class config_format( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'config_format': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer_mode( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_mode': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class limit_mm_per_prompt( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'limit_mm_per_prompt': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_num_batched_tokens( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_num_batched_tokens': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class tokenizer( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class seed( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'seed': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class code_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'code_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class rope_scaling( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'rope_scaling': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class tokenizer_revision( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'tokenizer_revision': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class quantization_param_path( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'quantization_param_path': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class max_seq_len_to_capture( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'max_seq_len_to_capture': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class disable_sliding_window( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'disable_sliding_window': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class skip_tokenizer_init( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'skip_tokenizer_init': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class served_model_name( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'served_model_name': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class override_neuron_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'override_neuron_config': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class mm_processor_kwargs( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, None, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'mm_processor_kwargs': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + + + class block_size( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'block_size': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class gpu_memory_utilization( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'gpu_memory_utilization': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class swap_space( + schemas.NumberBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, float, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'swap_space': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class cache_dtype( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'cache_dtype': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class num_gpu_blocks_override( + schemas.IntBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneDecimalMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, decimal.Decimal, int, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'num_gpu_blocks_override': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class enable_prefix_caching( + schemas.BoolBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneBoolMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, bool, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'enable_prefix_caching': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "quantize": quantize, + "checkpoint_path": checkpoint_path, + "post_inference_hooks": post_inference_hooks, + "cpus": cpus, + "gpus": gpus, + "memory": memory, + "gpu_type": gpu_type, + "storage": storage, + "nodes_per_worker": nodes_per_worker, + "optimize_costs": optimize_costs, + "prewarm": prewarm, + "high_priority": high_priority, + "billing_tags": billing_tags, + "default_callback_url": default_callback_url, + "default_callback_auth": default_callback_auth, + "public_inference": public_inference, + "chat_template_override": chat_template_override, + "enable_startup_metrics": enable_startup_metrics, + "model_name": model_name, + "source": source, + "inference_framework": inference_framework, + "inference_framework_image_tag": inference_framework_image_tag, + "num_shards": num_shards, + "metadata": metadata, + "force_bundle_recreation": force_bundle_recreation, + "min_workers": min_workers, + "max_workers": max_workers, + "per_worker": per_worker, + "labels": labels, + "max_gpu_memory_utilization": max_gpu_memory_utilization, + "attention_backend": attention_backend, + "max_model_len": max_model_len, + "max_num_seqs": max_num_seqs, + "enforce_eager": enforce_eager, + "trust_remote_code": trust_remote_code, + "pipeline_parallel_size": pipeline_parallel_size, + "tensor_parallel_size": tensor_parallel_size, + "quantization": quantization, + "disable_log_requests": disable_log_requests, + "chat_template": chat_template, + "tool_call_parser": tool_call_parser, + "enable_auto_tool_choice": enable_auto_tool_choice, + "load_format": load_format, + "config_format": config_format, + "tokenizer_mode": tokenizer_mode, + "limit_mm_per_prompt": limit_mm_per_prompt, + "max_num_batched_tokens": max_num_batched_tokens, + "tokenizer": tokenizer, + "dtype": dtype, + "seed": seed, + "revision": revision, + "code_revision": code_revision, + "rope_scaling": rope_scaling, + "tokenizer_revision": tokenizer_revision, + "quantization_param_path": quantization_param_path, + "max_seq_len_to_capture": max_seq_len_to_capture, + "disable_sliding_window": disable_sliding_window, + "skip_tokenizer_init": skip_tokenizer_init, + "served_model_name": served_model_name, + "override_neuron_config": override_neuron_config, + "mm_processor_kwargs": mm_processor_kwargs, + "block_size": block_size, + "gpu_memory_utilization": gpu_memory_utilization, + "swap_space": swap_space, + "cache_dtype": cache_dtype, + "num_gpu_blocks_override": num_gpu_blocks_override, + "enable_prefix_caching": enable_prefix_caching, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantize"]) -> 'Quantization': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["checkpoint_path"]) -> MetaOapg.properties.checkpoint_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["post_inference_hooks"]) -> MetaOapg.properties.post_inference_hooks: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cpus"]) -> MetaOapg.properties.cpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpus"]) -> MetaOapg.properties.gpus: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["memory"]) -> MetaOapg.properties.memory: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_type"]) -> 'GpuType': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["storage"]) -> MetaOapg.properties.storage: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["nodes_per_worker"]) -> MetaOapg.properties.nodes_per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["optimize_costs"]) -> MetaOapg.properties.optimize_costs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["prewarm"]) -> MetaOapg.properties.prewarm: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["high_priority"]) -> MetaOapg.properties.high_priority: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["billing_tags"]) -> MetaOapg.properties.billing_tags: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_url"]) -> MetaOapg.properties.default_callback_url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["default_callback_auth"]) -> 'CallbackAuth': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["public_inference"]) -> MetaOapg.properties.public_inference: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template_override"]) -> MetaOapg.properties.chat_template_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> MetaOapg.properties.enable_startup_metrics: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["model_name"]) -> MetaOapg.properties.model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["source"]) -> 'LLMSource': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework"]) -> MetaOapg.properties.inference_framework: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> MetaOapg.properties.inference_framework_image_tag: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_shards"]) -> MetaOapg.properties.num_shards: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["metadata"]) -> MetaOapg.properties.metadata: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> MetaOapg.properties.force_bundle_recreation: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["min_workers"]) -> MetaOapg.properties.min_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_workers"]) -> MetaOapg.properties.max_workers: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["per_worker"]) -> MetaOapg.properties.per_worker: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["labels"]) -> MetaOapg.properties.labels: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> MetaOapg.properties.max_gpu_memory_utilization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["attention_backend"]) -> MetaOapg.properties.attention_backend: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_model_len"]) -> MetaOapg.properties.max_model_len: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_seqs"]) -> MetaOapg.properties.max_num_seqs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enforce_eager"]) -> MetaOapg.properties.enforce_eager: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["trust_remote_code"]) -> MetaOapg.properties.trust_remote_code: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> MetaOapg.properties.pipeline_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> MetaOapg.properties.tensor_parallel_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization"]) -> MetaOapg.properties.quantization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_log_requests"]) -> MetaOapg.properties.disable_log_requests: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["chat_template"]) -> MetaOapg.properties.chat_template: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tool_call_parser"]) -> MetaOapg.properties.tool_call_parser: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> MetaOapg.properties.enable_auto_tool_choice: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["load_format"]) -> MetaOapg.properties.load_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["config_format"]) -> MetaOapg.properties.config_format: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_mode"]) -> MetaOapg.properties.tokenizer_mode: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> MetaOapg.properties.limit_mm_per_prompt: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> MetaOapg.properties.max_num_batched_tokens: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer"]) -> MetaOapg.properties.tokenizer: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["dtype"]) -> MetaOapg.properties.dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["seed"]) -> MetaOapg.properties.seed: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["revision"]) -> MetaOapg.properties.revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["code_revision"]) -> MetaOapg.properties.code_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["rope_scaling"]) -> MetaOapg.properties.rope_scaling: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["tokenizer_revision"]) -> MetaOapg.properties.tokenizer_revision: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["quantization_param_path"]) -> MetaOapg.properties.quantization_param_path: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> MetaOapg.properties.max_seq_len_to_capture: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["disable_sliding_window"]) -> MetaOapg.properties.disable_sliding_window: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> MetaOapg.properties.skip_tokenizer_init: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["served_model_name"]) -> MetaOapg.properties.served_model_name: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["override_neuron_config"]) -> MetaOapg.properties.override_neuron_config: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> MetaOapg.properties.mm_processor_kwargs: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["block_size"]) -> MetaOapg.properties.block_size: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> MetaOapg.properties.gpu_memory_utilization: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["swap_space"]) -> MetaOapg.properties.swap_space: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["cache_dtype"]) -> MetaOapg.properties.cache_dtype: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> MetaOapg.properties.num_gpu_blocks_override: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> MetaOapg.properties.enable_prefix_caching: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantize"]) -> typing.Union['Quantization', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["checkpoint_path"]) -> typing.Union[MetaOapg.properties.checkpoint_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["post_inference_hooks"]) -> typing.Union[MetaOapg.properties.post_inference_hooks, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cpus"]) -> typing.Union[MetaOapg.properties.cpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpus"]) -> typing.Union[MetaOapg.properties.gpus, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["memory"]) -> typing.Union[MetaOapg.properties.memory, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_type"]) -> typing.Union['GpuType', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["storage"]) -> typing.Union[MetaOapg.properties.storage, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["nodes_per_worker"]) -> typing.Union[MetaOapg.properties.nodes_per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["optimize_costs"]) -> typing.Union[MetaOapg.properties.optimize_costs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["prewarm"]) -> typing.Union[MetaOapg.properties.prewarm, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["high_priority"]) -> typing.Union[MetaOapg.properties.high_priority, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["billing_tags"]) -> typing.Union[MetaOapg.properties.billing_tags, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_url"]) -> typing.Union[MetaOapg.properties.default_callback_url, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["default_callback_auth"]) -> typing.Union['CallbackAuth', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["public_inference"]) -> typing.Union[MetaOapg.properties.public_inference, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template_override"]) -> typing.Union[MetaOapg.properties.chat_template_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_startup_metrics"]) -> typing.Union[MetaOapg.properties.enable_startup_metrics, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["model_name"]) -> typing.Union[MetaOapg.properties.model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["source"]) -> typing.Union['LLMSource', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework"]) -> typing.Union[MetaOapg.properties.inference_framework, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["inference_framework_image_tag"]) -> typing.Union[MetaOapg.properties.inference_framework_image_tag, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_shards"]) -> typing.Union[MetaOapg.properties.num_shards, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["metadata"]) -> typing.Union[MetaOapg.properties.metadata, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["force_bundle_recreation"]) -> typing.Union[MetaOapg.properties.force_bundle_recreation, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["min_workers"]) -> typing.Union[MetaOapg.properties.min_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_workers"]) -> typing.Union[MetaOapg.properties.max_workers, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["per_worker"]) -> typing.Union[MetaOapg.properties.per_worker, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["labels"]) -> typing.Union[MetaOapg.properties.labels, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.max_gpu_memory_utilization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["attention_backend"]) -> typing.Union[MetaOapg.properties.attention_backend, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_model_len"]) -> typing.Union[MetaOapg.properties.max_model_len, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_seqs"]) -> typing.Union[MetaOapg.properties.max_num_seqs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enforce_eager"]) -> typing.Union[MetaOapg.properties.enforce_eager, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["trust_remote_code"]) -> typing.Union[MetaOapg.properties.trust_remote_code, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["pipeline_parallel_size"]) -> typing.Union[MetaOapg.properties.pipeline_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tensor_parallel_size"]) -> typing.Union[MetaOapg.properties.tensor_parallel_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization"]) -> typing.Union[MetaOapg.properties.quantization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_log_requests"]) -> typing.Union[MetaOapg.properties.disable_log_requests, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["chat_template"]) -> typing.Union[MetaOapg.properties.chat_template, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tool_call_parser"]) -> typing.Union[MetaOapg.properties.tool_call_parser, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_auto_tool_choice"]) -> typing.Union[MetaOapg.properties.enable_auto_tool_choice, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["load_format"]) -> typing.Union[MetaOapg.properties.load_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["config_format"]) -> typing.Union[MetaOapg.properties.config_format, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_mode"]) -> typing.Union[MetaOapg.properties.tokenizer_mode, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["limit_mm_per_prompt"]) -> typing.Union[MetaOapg.properties.limit_mm_per_prompt, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_num_batched_tokens"]) -> typing.Union[MetaOapg.properties.max_num_batched_tokens, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer"]) -> typing.Union[MetaOapg.properties.tokenizer, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["dtype"]) -> typing.Union[MetaOapg.properties.dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["seed"]) -> typing.Union[MetaOapg.properties.seed, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["revision"]) -> typing.Union[MetaOapg.properties.revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["code_revision"]) -> typing.Union[MetaOapg.properties.code_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["rope_scaling"]) -> typing.Union[MetaOapg.properties.rope_scaling, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["tokenizer_revision"]) -> typing.Union[MetaOapg.properties.tokenizer_revision, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["quantization_param_path"]) -> typing.Union[MetaOapg.properties.quantization_param_path, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["max_seq_len_to_capture"]) -> typing.Union[MetaOapg.properties.max_seq_len_to_capture, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["disable_sliding_window"]) -> typing.Union[MetaOapg.properties.disable_sliding_window, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["skip_tokenizer_init"]) -> typing.Union[MetaOapg.properties.skip_tokenizer_init, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["served_model_name"]) -> typing.Union[MetaOapg.properties.served_model_name, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["override_neuron_config"]) -> typing.Union[MetaOapg.properties.override_neuron_config, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["mm_processor_kwargs"]) -> typing.Union[MetaOapg.properties.mm_processor_kwargs, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["block_size"]) -> typing.Union[MetaOapg.properties.block_size, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["gpu_memory_utilization"]) -> typing.Union[MetaOapg.properties.gpu_memory_utilization, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["swap_space"]) -> typing.Union[MetaOapg.properties.swap_space, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["cache_dtype"]) -> typing.Union[MetaOapg.properties.cache_dtype, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["num_gpu_blocks_override"]) -> typing.Union[MetaOapg.properties.num_gpu_blocks_override, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["enable_prefix_caching"]) -> typing.Union[MetaOapg.properties.enable_prefix_caching, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["quantize", "checkpoint_path", "post_inference_hooks", "cpus", "gpus", "memory", "gpu_type", "storage", "nodes_per_worker", "optimize_costs", "prewarm", "high_priority", "billing_tags", "default_callback_url", "default_callback_auth", "public_inference", "chat_template_override", "enable_startup_metrics", "model_name", "source", "inference_framework", "inference_framework_image_tag", "num_shards", "metadata", "force_bundle_recreation", "min_workers", "max_workers", "per_worker", "labels", "max_gpu_memory_utilization", "attention_backend", "max_model_len", "max_num_seqs", "enforce_eager", "trust_remote_code", "pipeline_parallel_size", "tensor_parallel_size", "quantization", "disable_log_requests", "chat_template", "tool_call_parser", "enable_auto_tool_choice", "load_format", "config_format", "tokenizer_mode", "limit_mm_per_prompt", "max_num_batched_tokens", "tokenizer", "dtype", "seed", "revision", "code_revision", "rope_scaling", "tokenizer_revision", "quantization_param_path", "max_seq_len_to_capture", "disable_sliding_window", "skip_tokenizer_init", "served_model_name", "override_neuron_config", "mm_processor_kwargs", "block_size", "gpu_memory_utilization", "swap_space", "cache_dtype", "num_gpu_blocks_override", "enable_prefix_caching", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + quantize: typing.Union['Quantization', schemas.Unset] = schemas.unset, + checkpoint_path: typing.Union[MetaOapg.properties.checkpoint_path, None, str, schemas.Unset] = schemas.unset, + post_inference_hooks: typing.Union[MetaOapg.properties.post_inference_hooks, list, tuple, None, schemas.Unset] = schemas.unset, + cpus: typing.Union[MetaOapg.properties.cpus, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpus: typing.Union[MetaOapg.properties.gpus, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + memory: typing.Union[MetaOapg.properties.memory, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + gpu_type: typing.Union['GpuType', schemas.Unset] = schemas.unset, + storage: typing.Union[MetaOapg.properties.storage, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, schemas.Unset] = schemas.unset, + nodes_per_worker: typing.Union[MetaOapg.properties.nodes_per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + optimize_costs: typing.Union[MetaOapg.properties.optimize_costs, None, bool, schemas.Unset] = schemas.unset, + prewarm: typing.Union[MetaOapg.properties.prewarm, None, bool, schemas.Unset] = schemas.unset, + high_priority: typing.Union[MetaOapg.properties.high_priority, None, bool, schemas.Unset] = schemas.unset, + billing_tags: typing.Union[MetaOapg.properties.billing_tags, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + default_callback_url: typing.Union[MetaOapg.properties.default_callback_url, None, str, schemas.Unset] = schemas.unset, + default_callback_auth: typing.Union['CallbackAuth', schemas.Unset] = schemas.unset, + public_inference: typing.Union[MetaOapg.properties.public_inference, None, bool, schemas.Unset] = schemas.unset, + chat_template_override: typing.Union[MetaOapg.properties.chat_template_override, None, str, schemas.Unset] = schemas.unset, + enable_startup_metrics: typing.Union[MetaOapg.properties.enable_startup_metrics, None, bool, schemas.Unset] = schemas.unset, + model_name: typing.Union[MetaOapg.properties.model_name, None, str, schemas.Unset] = schemas.unset, + source: typing.Union['LLMSource', schemas.Unset] = schemas.unset, + inference_framework: typing.Union[MetaOapg.properties.inference_framework, str, schemas.Unset] = schemas.unset, + inference_framework_image_tag: typing.Union[MetaOapg.properties.inference_framework_image_tag, None, str, schemas.Unset] = schemas.unset, + num_shards: typing.Union[MetaOapg.properties.num_shards, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + metadata: typing.Union[MetaOapg.properties.metadata, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + force_bundle_recreation: typing.Union[MetaOapg.properties.force_bundle_recreation, None, bool, schemas.Unset] = schemas.unset, + min_workers: typing.Union[MetaOapg.properties.min_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_workers: typing.Union[MetaOapg.properties.max_workers, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + per_worker: typing.Union[MetaOapg.properties.per_worker, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + labels: typing.Union[MetaOapg.properties.labels, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + max_gpu_memory_utilization: typing.Union[MetaOapg.properties.max_gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + attention_backend: typing.Union[MetaOapg.properties.attention_backend, None, str, schemas.Unset] = schemas.unset, + max_model_len: typing.Union[MetaOapg.properties.max_model_len, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + max_num_seqs: typing.Union[MetaOapg.properties.max_num_seqs, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enforce_eager: typing.Union[MetaOapg.properties.enforce_eager, None, bool, schemas.Unset] = schemas.unset, + trust_remote_code: typing.Union[MetaOapg.properties.trust_remote_code, None, bool, schemas.Unset] = schemas.unset, + pipeline_parallel_size: typing.Union[MetaOapg.properties.pipeline_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tensor_parallel_size: typing.Union[MetaOapg.properties.tensor_parallel_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + quantization: typing.Union[MetaOapg.properties.quantization, None, str, schemas.Unset] = schemas.unset, + disable_log_requests: typing.Union[MetaOapg.properties.disable_log_requests, None, bool, schemas.Unset] = schemas.unset, + chat_template: typing.Union[MetaOapg.properties.chat_template, None, str, schemas.Unset] = schemas.unset, + tool_call_parser: typing.Union[MetaOapg.properties.tool_call_parser, None, str, schemas.Unset] = schemas.unset, + enable_auto_tool_choice: typing.Union[MetaOapg.properties.enable_auto_tool_choice, None, bool, schemas.Unset] = schemas.unset, + load_format: typing.Union[MetaOapg.properties.load_format, None, str, schemas.Unset] = schemas.unset, + config_format: typing.Union[MetaOapg.properties.config_format, None, str, schemas.Unset] = schemas.unset, + tokenizer_mode: typing.Union[MetaOapg.properties.tokenizer_mode, None, str, schemas.Unset] = schemas.unset, + limit_mm_per_prompt: typing.Union[MetaOapg.properties.limit_mm_per_prompt, None, str, schemas.Unset] = schemas.unset, + max_num_batched_tokens: typing.Union[MetaOapg.properties.max_num_batched_tokens, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + tokenizer: typing.Union[MetaOapg.properties.tokenizer, None, str, schemas.Unset] = schemas.unset, + dtype: typing.Union[MetaOapg.properties.dtype, None, str, schemas.Unset] = schemas.unset, + seed: typing.Union[MetaOapg.properties.seed, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + revision: typing.Union[MetaOapg.properties.revision, None, str, schemas.Unset] = schemas.unset, + code_revision: typing.Union[MetaOapg.properties.code_revision, None, str, schemas.Unset] = schemas.unset, + rope_scaling: typing.Union[MetaOapg.properties.rope_scaling, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + tokenizer_revision: typing.Union[MetaOapg.properties.tokenizer_revision, None, str, schemas.Unset] = schemas.unset, + quantization_param_path: typing.Union[MetaOapg.properties.quantization_param_path, None, str, schemas.Unset] = schemas.unset, + max_seq_len_to_capture: typing.Union[MetaOapg.properties.max_seq_len_to_capture, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + disable_sliding_window: typing.Union[MetaOapg.properties.disable_sliding_window, None, bool, schemas.Unset] = schemas.unset, + skip_tokenizer_init: typing.Union[MetaOapg.properties.skip_tokenizer_init, None, bool, schemas.Unset] = schemas.unset, + served_model_name: typing.Union[MetaOapg.properties.served_model_name, None, str, schemas.Unset] = schemas.unset, + override_neuron_config: typing.Union[MetaOapg.properties.override_neuron_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + mm_processor_kwargs: typing.Union[MetaOapg.properties.mm_processor_kwargs, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, + block_size: typing.Union[MetaOapg.properties.block_size, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + gpu_memory_utilization: typing.Union[MetaOapg.properties.gpu_memory_utilization, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + swap_space: typing.Union[MetaOapg.properties.swap_space, None, decimal.Decimal, int, float, schemas.Unset] = schemas.unset, + cache_dtype: typing.Union[MetaOapg.properties.cache_dtype, None, str, schemas.Unset] = schemas.unset, + num_gpu_blocks_override: typing.Union[MetaOapg.properties.num_gpu_blocks_override, None, decimal.Decimal, int, schemas.Unset] = schemas.unset, + enable_prefix_caching: typing.Union[MetaOapg.properties.enable_prefix_caching, None, bool, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UpdateVLLMModelEndpointRequest': + return super().__new__( + cls, + *_args, + quantize=quantize, + checkpoint_path=checkpoint_path, + post_inference_hooks=post_inference_hooks, + cpus=cpus, + gpus=gpus, + memory=memory, + gpu_type=gpu_type, + storage=storage, + nodes_per_worker=nodes_per_worker, + optimize_costs=optimize_costs, + prewarm=prewarm, + high_priority=high_priority, + billing_tags=billing_tags, + default_callback_url=default_callback_url, + default_callback_auth=default_callback_auth, + public_inference=public_inference, + chat_template_override=chat_template_override, + enable_startup_metrics=enable_startup_metrics, + model_name=model_name, + source=source, + inference_framework=inference_framework, + inference_framework_image_tag=inference_framework_image_tag, + num_shards=num_shards, + metadata=metadata, + force_bundle_recreation=force_bundle_recreation, + min_workers=min_workers, + max_workers=max_workers, + per_worker=per_worker, + labels=labels, + max_gpu_memory_utilization=max_gpu_memory_utilization, + attention_backend=attention_backend, + max_model_len=max_model_len, + max_num_seqs=max_num_seqs, + enforce_eager=enforce_eager, + trust_remote_code=trust_remote_code, + pipeline_parallel_size=pipeline_parallel_size, + tensor_parallel_size=tensor_parallel_size, + quantization=quantization, + disable_log_requests=disable_log_requests, + chat_template=chat_template, + tool_call_parser=tool_call_parser, + enable_auto_tool_choice=enable_auto_tool_choice, + load_format=load_format, + config_format=config_format, + tokenizer_mode=tokenizer_mode, + limit_mm_per_prompt=limit_mm_per_prompt, + max_num_batched_tokens=max_num_batched_tokens, + tokenizer=tokenizer, + dtype=dtype, + seed=seed, + revision=revision, + code_revision=code_revision, + rope_scaling=rope_scaling, + tokenizer_revision=tokenizer_revision, + quantization_param_path=quantization_param_path, + max_seq_len_to_capture=max_seq_len_to_capture, + disable_sliding_window=disable_sliding_window, + skip_tokenizer_init=skip_tokenizer_init, + served_model_name=served_model_name, + override_neuron_config=override_neuron_config, + mm_processor_kwargs=mm_processor_kwargs, + block_size=block_size, + gpu_memory_utilization=gpu_memory_utilization, + swap_space=swap_space, + cache_dtype=cache_dtype, + num_gpu_blocks_override=num_gpu_blocks_override, + enable_prefix_caching=enable_prefix_caching, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.callback_auth import CallbackAuth +from launch.api_client.model.gpu_type import GpuType +from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.quantization import Quantization diff --git a/launch/api_client/model/upload_file_response.py b/launch/api_client/model/upload_file_response.py index ee963a59..15cfb00e 100644 --- a/launch/api_client/model/upload_file_response.py +++ b/launch/api_client/model/upload_file_response.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class UploadFileResponse(schemas.DictSchema): +class UploadFileResponse( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,82 +34,48 @@ class UploadFileResponse(schemas.DictSchema): Response object for uploading a file. """ + class MetaOapg: required = { "id", } - + class properties: id = schemas.StrSchema __annotations__ = { "id": id, } - + id: MetaOapg.properties.id - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def __getitem__(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["id", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["id"]) -> MetaOapg.properties.id: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal["id",], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["id", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - id: typing.Union[ - MetaOapg.properties.id, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + id: typing.Union[MetaOapg.properties.id, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "UploadFileResponse": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UploadFileResponse': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/url_citation.py b/launch/api_client/model/url_citation.py new file mode 100644 index 00000000..47ddef88 --- /dev/null +++ b/launch/api_client/model/url_citation.py @@ -0,0 +1,119 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UrlCitation( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "start_index", + "end_index", + "title", + "url", + } + + class properties: + end_index = schemas.IntSchema + start_index = schemas.IntSchema + url = schemas.StrSchema + title = schemas.StrSchema + __annotations__ = { + "end_index": end_index, + "start_index": start_index, + "url": url, + "title": title, + } + + start_index: MetaOapg.properties.start_index + end_index: MetaOapg.properties.end_index + title: MetaOapg.properties.title + url: MetaOapg.properties.url + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["end_index"]) -> MetaOapg.properties.end_index: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["start_index"]) -> MetaOapg.properties.start_index: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["title"]) -> MetaOapg.properties.title: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["end_index", "start_index", "url", "title", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["end_index"]) -> MetaOapg.properties.end_index: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["start_index"]) -> MetaOapg.properties.start_index: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["url"]) -> MetaOapg.properties.url: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["title"]) -> MetaOapg.properties.title: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["end_index", "start_index", "url", "title", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + start_index: typing.Union[MetaOapg.properties.start_index, decimal.Decimal, int, ], + end_index: typing.Union[MetaOapg.properties.end_index, decimal.Decimal, int, ], + title: typing.Union[MetaOapg.properties.title, str, ], + url: typing.Union[MetaOapg.properties.url, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UrlCitation': + return super().__new__( + cls, + *_args, + start_index=start_index, + end_index=end_index, + title=title, + url=url, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/user_location.py b/launch/api_client/model/user_location.py new file mode 100644 index 00000000..7aa18e60 --- /dev/null +++ b/launch/api_client/model/user_location.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class UserLocation( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + required = { + "approximate", + "type", + } + + class properties: + + + class type( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "approximate": "APPROXIMATE", + } + + @schemas.classproperty + def APPROXIMATE(cls): + return cls("approximate") + + @staticmethod + def approximate() -> typing.Type['WebSearchLocation']: + return WebSearchLocation + __annotations__ = { + "type": type, + "approximate": approximate, + } + + approximate: 'WebSearchLocation' + type: MetaOapg.properties.type + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["approximate"]) -> 'WebSearchLocation': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["type", "approximate", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["approximate"]) -> 'WebSearchLocation': ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["type", "approximate", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + approximate: 'WebSearchLocation', + type: typing.Union[MetaOapg.properties.type, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'UserLocation': + return super().__new__( + cls, + *_args, + approximate=approximate, + type=type, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.web_search_location import WebSearchLocation diff --git a/launch/api_client/model/validation_error.py b/launch/api_client/model/validation_error.py index 8cc3cf6a..6b4dab58 100644 --- a/launch/api_client/model/validation_error.py +++ b/launch/api_client/model/validation_error.py @@ -23,30 +23,43 @@ from launch.api_client import schemas # noqa: F401 -class ValidationError(schemas.DictSchema): +class ValidationError( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ + class MetaOapg: required = { "msg", "loc", "type", } - + class properties: - class loc(schemas.ListSchema): + + + class loc( + schemas.ListSchema + ): + + class MetaOapg: + + class items( schemas.ComposedSchema, ): + + class MetaOapg: any_of_0 = schemas.StrSchema any_of_1 = schemas.IntSchema - + @classmethod @functools.lru_cache() def any_of(cls): @@ -61,109 +74,34 @@ def any_of(cls): cls.any_of_0, cls.any_of_1, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "items": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'items': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ] - ], - ], + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ]], typing.List[typing.Union[MetaOapg.items, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ]]], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "loc": + ) -> 'loc': return super().__new__( cls, _arg, _configuration=_configuration, ) - + def __getitem__(self, i: int) -> MetaOapg.items: return super().__getitem__(i) - msg = schemas.StrSchema type = schemas.StrSchema __annotations__ = { @@ -171,107 +109,53 @@ def __getitem__(self, i: int) -> MetaOapg.items: "msg": msg, "type": type, } - + msg: MetaOapg.properties.msg loc: MetaOapg.properties.loc type: MetaOapg.properties.type - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: - ... - + def __getitem__(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: - ... - + def __getitem__(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: - ... - + def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "loc", - "msg", - "type", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["loc", "msg", "type", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "loc", - "msg", - "type", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["loc", "msg", "type", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - msg: typing.Union[ - MetaOapg.properties.msg, - str, - ], - loc: typing.Union[ - MetaOapg.properties.loc, - list, - tuple, - ], - type: typing.Union[ - MetaOapg.properties.type, - str, - ], + *_args: typing.Union[dict, frozendict.frozendict, ], + msg: typing.Union[MetaOapg.properties.msg, str, ], + loc: typing.Union[MetaOapg.properties.loc, list, tuple, ], + type: typing.Union[MetaOapg.properties.type, str, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ValidationError": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ValidationError': return super().__new__( cls, *_args, diff --git a/launch/api_client/model/validation_error.pyi b/launch/api_client/model/validation_error.pyi deleted file mode 100644 index 865e67c4..00000000 --- a/launch/api_client/model/validation_error.pyi +++ /dev/null @@ -1,258 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ValidationError(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - class MetaOapg: - required = { - "msg", - "loc", - "type", - } - - class properties: - class loc(schemas.ListSchema): - class MetaOapg: - class items( - schemas.ComposedSchema, - ): - class MetaOapg: - any_of_0 = schemas.StrSchema - any_of_1 = schemas.IntSchema - - @classmethod - @functools.lru_cache() - def any_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - cls.any_of_0, - cls.any_of_1, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "items": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "loc": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - msg = schemas.StrSchema - type = schemas.StrSchema - __annotations__ = { - "loc": loc, - "msg": msg, - "type": type, - } - msg: MetaOapg.properties.msg - loc: MetaOapg.properties.loc - type: MetaOapg.properties.type - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "loc", - "msg", - "type", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["loc"]) -> MetaOapg.properties.loc: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["msg"]) -> MetaOapg.properties.msg: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["type"]) -> MetaOapg.properties.type: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "loc", - "msg", - "type", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - msg: typing.Union[ - MetaOapg.properties.msg, - str, - ], - loc: typing.Union[ - MetaOapg.properties.loc, - list, - tuple, - ], - type: typing.Union[ - MetaOapg.properties.type, - str, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ValidationError": - return super().__new__( - cls, - *_args, - msg=msg, - loc=loc, - type=type, - _configuration=_configuration, - **kwargs, - ) diff --git a/launch/api_client/model/voice_ids_shared.py b/launch/api_client/model/voice_ids_shared.py new file mode 100644 index 00000000..2b44a00e --- /dev/null +++ b/launch/api_client/model/voice_ids_shared.py @@ -0,0 +1,132 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class VoiceIdsShared( + schemas.ComposedSchema, +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + any_of_0 = schemas.StrSchema + + + class any_of_1( + schemas.EnumBase, + schemas.StrSchema + ): + + + class MetaOapg: + enum_value_to_name = { + "alloy": "ALLOY", + "ash": "ASH", + "ballad": "BALLAD", + "coral": "CORAL", + "echo": "ECHO", + "fable": "FABLE", + "onyx": "ONYX", + "nova": "NOVA", + "sage": "SAGE", + "shimmer": "SHIMMER", + "verse": "VERSE", + } + + @schemas.classproperty + def ALLOY(cls): + return cls("alloy") + + @schemas.classproperty + def ASH(cls): + return cls("ash") + + @schemas.classproperty + def BALLAD(cls): + return cls("ballad") + + @schemas.classproperty + def CORAL(cls): + return cls("coral") + + @schemas.classproperty + def ECHO(cls): + return cls("echo") + + @schemas.classproperty + def FABLE(cls): + return cls("fable") + + @schemas.classproperty + def ONYX(cls): + return cls("onyx") + + @schemas.classproperty + def NOVA(cls): + return cls("nova") + + @schemas.classproperty + def SAGE(cls): + return cls("sage") + + @schemas.classproperty + def SHIMMER(cls): + return cls("shimmer") + + @schemas.classproperty + def VERSE(cls): + return cls("verse") + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + cls.any_of_0, + cls.any_of_1, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'VoiceIdsShared': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/model_endpoint_order_by.pyi b/launch/api_client/model/web_search_context_size.py similarity index 57% rename from launch/api_client/model/model_endpoint_order_by.pyi rename to launch/api_client/model/web_search_context_size.py index 28193762..31c3e655 100644 --- a/launch/api_client/model/model_endpoint_order_by.pyi +++ b/launch/api_client/model/web_search_context_size.py @@ -19,23 +19,40 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 -class ModelEndpointOrderBy(schemas.EnumBase, schemas.StrSchema): +from launch.api_client import schemas # noqa: F401 + + +class WebSearchContextSize( + schemas.EnumBase, + schemas.StrSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. - The canonical list of possible orderings of Model Bundles. + High level guidance for the amount of context window space to use for the +search. One of `low`, `medium`, or `high`. `medium` is the default. + """ + + class MetaOapg: + enum_value_to_name = { + "low": "LOW", + "medium": "MEDIUM", + "high": "HIGH", + } + @schemas.classproperty - def NEWEST(cls): - return cls("newest") + def LOW(cls): + return cls("low") + @schemas.classproperty - def OLDEST(cls): - return cls("oldest") + def MEDIUM(cls): + return cls("medium") + @schemas.classproperty - def ALPHABETICAL(cls): - return cls("alphabetical") + def HIGH(cls): + return cls("high") diff --git a/launch/api_client/model/web_search_location.py b/launch/api_client/model/web_search_location.py new file mode 100644 index 00000000..20fbb38c --- /dev/null +++ b/launch/api_client/model/web_search_location.py @@ -0,0 +1,184 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class WebSearchLocation( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + + class country( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'country': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class region( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'region': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class city( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'city': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + + + class timezone( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin + ): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'timezone': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) + __annotations__ = { + "country": country, + "region": region, + "city": city, + "timezone": timezone, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["country"]) -> MetaOapg.properties.country: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["region"]) -> MetaOapg.properties.region: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["city"]) -> MetaOapg.properties.city: ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["timezone"]) -> MetaOapg.properties.timezone: ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["country", "region", "city", "timezone", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["country"]) -> typing.Union[MetaOapg.properties.country, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["region"]) -> typing.Union[MetaOapg.properties.region, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["city"]) -> typing.Union[MetaOapg.properties.city, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["timezone"]) -> typing.Union[MetaOapg.properties.timezone, schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["country", "region", "city", "timezone", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + country: typing.Union[MetaOapg.properties.country, None, str, schemas.Unset] = schemas.unset, + region: typing.Union[MetaOapg.properties.region, None, str, schemas.Unset] = schemas.unset, + city: typing.Union[MetaOapg.properties.city, None, str, schemas.Unset] = schemas.unset, + timezone: typing.Union[MetaOapg.properties.timezone, None, str, schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'WebSearchLocation': + return super().__new__( + cls, + *_args, + country=country, + region=region, + city=city, + timezone=timezone, + _configuration=_configuration, + **kwargs, + ) diff --git a/launch/api_client/model/web_search_options.py b/launch/api_client/model/web_search_options.py new file mode 100644 index 00000000..f14e71b3 --- /dev/null +++ b/launch/api_client/model/web_search_options.py @@ -0,0 +1,99 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import decimal # noqa: F401 +import functools # noqa: F401 +import io # noqa: F401 +import re # noqa: F401 +import typing # noqa: F401 +import uuid # noqa: F401 +from datetime import date, datetime # noqa: F401 + +import frozendict # noqa: F401 +import typing_extensions # noqa: F401 + +from launch.api_client import schemas # noqa: F401 + + +class WebSearchOptions( + schemas.DictSchema +): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + """ + + + class MetaOapg: + + class properties: + + @staticmethod + def user_location() -> typing.Type['UserLocation']: + return UserLocation + + @staticmethod + def search_context_size() -> typing.Type['WebSearchContextSize']: + return WebSearchContextSize + __annotations__ = { + "user_location": user_location, + "search_context_size": search_context_size, + } + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["user_location"]) -> 'UserLocation': ... + + @typing.overload + def __getitem__(self, name: typing_extensions.Literal["search_context_size"]) -> 'WebSearchContextSize': ... + + @typing.overload + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["user_location", "search_context_size", ], str]): + # dict_instance[name] accessor + return super().__getitem__(name) + + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["user_location"]) -> typing.Union['UserLocation', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: typing_extensions.Literal["search_context_size"]) -> typing.Union['WebSearchContextSize', schemas.Unset]: ... + + @typing.overload + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["user_location", "search_context_size", ], str]): + return super().get_item_oapg(name) + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, ], + user_location: typing.Union['UserLocation', schemas.Unset] = schemas.unset, + search_context_size: typing.Union['WebSearchContextSize', schemas.Unset] = schemas.unset, + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'WebSearchOptions': + return super().__new__( + cls, + *_args, + user_location=user_location, + search_context_size=search_context_size, + _configuration=_configuration, + **kwargs, + ) + +from launch.api_client.model.user_location import UserLocation +from launch.api_client.model.web_search_context_size import ( + WebSearchContextSize, +) diff --git a/launch/api_client/model/zip_artifact_flavor.py b/launch/api_client/model/zip_artifact_flavor.py index 79286ebc..f2ade7df 100644 --- a/launch/api_client/model/zip_artifact_flavor.py +++ b/launch/api_client/model/zip_artifact_flavor.py @@ -23,7 +23,9 @@ from launch.api_client import schemas # noqa: F401 -class ZipArtifactFlavor(schemas.DictSchema): +class ZipArtifactFlavor( + schemas.DictSchema +): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -32,6 +34,7 @@ class ZipArtifactFlavor(schemas.DictSchema): This is the entity-layer class for the Model Bundle flavor of a zip artifact. """ + class MetaOapg: required = { "flavor", @@ -41,22 +44,40 @@ class MetaOapg: "location", "load_predict_fn_module_path", } - + class properties: - class flavor(schemas.EnumBase, schemas.StrSchema): + + + class requirements( + schemas.ListSchema + ): + + class MetaOapg: - enum_value_to_name = { - "zip_artifact": "ZIP_ARTIFACT", - } - - @schemas.classproperty - def ZIP_ARTIFACT(cls): - return cls("zip_artifact") - + items = schemas.StrSchema + + def __new__( + cls, + _arg: typing.Union[typing.Tuple[typing.Union[MetaOapg.items, str, ]], typing.List[typing.Union[MetaOapg.items, str, ]]], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'requirements': + return super().__new__( + cls, + _arg, + _configuration=_configuration, + ) + + def __getitem__(self, i: int) -> MetaOapg.items: + return super().__getitem__(i) + + class framework( schemas.ComposedSchema, ): + + class MetaOapg: + @classmethod @functools.lru_cache() def one_of(cls): @@ -72,281 +93,159 @@ def one_of(cls): TensorflowFramework, CustomFramework, ] - + + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "framework": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'framework': return super().__new__( cls, *_args, _configuration=_configuration, **kwargs, ) - - load_model_fn_module_path = schemas.StrSchema - load_predict_fn_module_path = schemas.StrSchema location = schemas.StrSchema - - class requirements(schemas.ListSchema): + + + class flavor( + schemas.EnumBase, + schemas.StrSchema + ): + + class MetaOapg: - items = schemas.StrSchema - + enum_value_to_name = { + "zip_artifact": "ZIP_ARTIFACT", + } + + @schemas.classproperty + def ZIP_ARTIFACT(cls): + return cls("zip_artifact") + load_predict_fn_module_path = schemas.StrSchema + load_model_fn_module_path = schemas.StrSchema + + + class app_config( + schemas.DictBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneFrozenDictMixin + ): + + + class MetaOapg: + additional_properties = schemas.AnyTypeSchema + + + def __getitem__(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + # dict_instance[name] accessor + return super().__getitem__(name) + + def get_item_oapg(self, name: typing.Union[str, ]) -> MetaOapg.additional_properties: + return super().get_item_oapg(name) + def __new__( cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], + *_args: typing.Union[dict, frozendict.frozendict, None, ], _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": + **kwargs: typing.Union[MetaOapg.additional_properties, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + ) -> 'app_config': return super().__new__( cls, - _arg, + *_args, _configuration=_configuration, + **kwargs, ) - - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - - app_config = schemas.DictSchema __annotations__ = { - "flavor": flavor, + "requirements": requirements, "framework": framework, - "load_model_fn_module_path": load_model_fn_module_path, - "load_predict_fn_module_path": load_predict_fn_module_path, "location": location, - "requirements": requirements, + "flavor": flavor, + "load_predict_fn_module_path": load_predict_fn_module_path, + "load_model_fn_module_path": load_model_fn_module_path, "app_config": app_config, } - + flavor: MetaOapg.properties.flavor requirements: MetaOapg.properties.requirements framework: MetaOapg.properties.framework load_model_fn_module_path: MetaOapg.properties.load_model_fn_module_path location: MetaOapg.properties.location load_predict_fn_module_path: MetaOapg.properties.load_predict_fn_module_path - + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: - ... - + def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["load_model_fn_module_path"] - ) -> MetaOapg.properties.load_model_fn_module_path: - ... - + def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["load_predict_fn_module_path"] - ) -> MetaOapg.properties.load_predict_fn_module_path: - ... - + def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def __getitem__(self, name: typing_extensions.Literal["load_predict_fn_module_path"]) -> MetaOapg.properties.load_predict_fn_module_path: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def __getitem__(self, name: typing_extensions.Literal["load_model_fn_module_path"]) -> MetaOapg.properties.load_model_fn_module_path: ... + @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: - ... - + def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... + @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: - ... - - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn_module_path", - "load_predict_fn_module_path", - "location", - "requirements", - "app_config", - ], - str, - ], - ): + def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... + + def __getitem__(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn_module_path", "load_model_fn_module_path", "app_config", ], str]): # dict_instance[name] accessor return super().__getitem__(name) - + + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["load_model_fn_module_path"] - ) -> MetaOapg.properties.load_model_fn_module_path: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["load_predict_fn_module_path"] - ) -> MetaOapg.properties.load_predict_fn_module_path: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["load_predict_fn_module_path"]) -> MetaOapg.properties.load_predict_fn_module_path: ... + @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["load_model_fn_module_path"]) -> MetaOapg.properties.load_model_fn_module_path: ... + @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: - ... - + def get_item_oapg(self, name: typing_extensions.Literal["app_config"]) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... + @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: - ... - - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn_module_path", - "load_predict_fn_module_path", - "location", - "requirements", - "app_config", - ], - str, - ], - ): + def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... + + def get_item_oapg(self, name: typing.Union[typing_extensions.Literal["requirements", "framework", "location", "flavor", "load_predict_fn_module_path", "load_model_fn_module_path", "app_config", ], str]): return super().get_item_oapg(name) + def __new__( cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - framework: typing.Union[ - MetaOapg.properties.framework, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - load_model_fn_module_path: typing.Union[ - MetaOapg.properties.load_model_fn_module_path, - str, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - load_predict_fn_module_path: typing.Union[ - MetaOapg.properties.load_predict_fn_module_path, - str, - ], - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, + *_args: typing.Union[dict, frozendict.frozendict, ], + flavor: typing.Union[MetaOapg.properties.flavor, str, ], + requirements: typing.Union[MetaOapg.properties.requirements, list, tuple, ], + framework: typing.Union[MetaOapg.properties.framework, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + load_model_fn_module_path: typing.Union[MetaOapg.properties.load_model_fn_module_path, str, ], + location: typing.Union[MetaOapg.properties.location, str, ], + load_predict_fn_module_path: typing.Union[MetaOapg.properties.load_predict_fn_module_path, str, ], + app_config: typing.Union[MetaOapg.properties.app_config, dict, frozendict.frozendict, None, schemas.Unset] = schemas.unset, _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ZipArtifactFlavor": + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'ZipArtifactFlavor': return super().__new__( cls, *_args, @@ -361,7 +260,6 @@ def __new__( **kwargs, ) - from launch.api_client.model.custom_framework import CustomFramework from launch.api_client.model.pytorch_framework import PytorchFramework from launch.api_client.model.tensorflow_framework import TensorflowFramework diff --git a/launch/api_client/model/zip_artifact_flavor.pyi b/launch/api_client/model/zip_artifact_flavor.pyi deleted file mode 100644 index ac4b9816..00000000 --- a/launch/api_client/model/zip_artifact_flavor.pyi +++ /dev/null @@ -1,319 +0,0 @@ -# coding: utf-8 - -""" - launch - - No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 - - The version of the OpenAPI document: 1.0.0 - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -from launch_client import schemas # noqa: F401 - -class ZipArtifactFlavor(schemas.DictSchema): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - This is the entity-layer class for the Model Bundle flavor of a zip artifact. - """ - - class MetaOapg: - required = { - "flavor", - "requirements", - "framework", - "load_model_fn_module_path", - "location", - "load_predict_fn_module_path", - } - - class properties: - class flavor(schemas.EnumBase, schemas.StrSchema): - @schemas.classproperty - def ZIP_ARTIFACT(cls): - return cls("zip_artifact") - - class framework( - schemas.ComposedSchema, - ): - class MetaOapg: - @classmethod - @functools.lru_cache() - def one_of(cls): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - return [ - PytorchFramework, - TensorflowFramework, - CustomFramework, - ] - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "framework": - return super().__new__( - cls, - *_args, - _configuration=_configuration, - **kwargs, - ) - load_model_fn_module_path = schemas.StrSchema - load_predict_fn_module_path = schemas.StrSchema - location = schemas.StrSchema - - class requirements(schemas.ListSchema): - class MetaOapg: - items = schemas.StrSchema - def __new__( - cls, - _arg: typing.Union[ - typing.Tuple[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - typing.List[ - typing.Union[ - MetaOapg.items, - str, - ] - ], - ], - _configuration: typing.Optional[schemas.Configuration] = None, - ) -> "requirements": - return super().__new__( - cls, - _arg, - _configuration=_configuration, - ) - def __getitem__(self, i: int) -> MetaOapg.items: - return super().__getitem__(i) - app_config = schemas.DictSchema - __annotations__ = { - "flavor": flavor, - "framework": framework, - "load_model_fn_module_path": load_model_fn_module_path, - "load_predict_fn_module_path": load_predict_fn_module_path, - "location": location, - "requirements": requirements, - "app_config": app_config, - } - flavor: MetaOapg.properties.flavor - requirements: MetaOapg.properties.requirements - framework: MetaOapg.properties.framework - load_model_fn_module_path: MetaOapg.properties.load_model_fn_module_path - location: MetaOapg.properties.location - load_predict_fn_module_path: MetaOapg.properties.load_predict_fn_module_path - - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["load_model_fn_module_path"] - ) -> MetaOapg.properties.load_model_fn_module_path: ... - @typing.overload - def __getitem__( - self, name: typing_extensions.Literal["load_predict_fn_module_path"] - ) -> MetaOapg.properties.load_predict_fn_module_path: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def __getitem__(self, name: typing_extensions.Literal["app_config"]) -> MetaOapg.properties.app_config: ... - @typing.overload - def __getitem__(self, name: str) -> schemas.UnsetAnyTypeSchema: ... - def __getitem__( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn_module_path", - "load_predict_fn_module_path", - "location", - "requirements", - "app_config", - ], - str, - ], - ): - # dict_instance[name] accessor - return super().__getitem__(name) - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["flavor"]) -> MetaOapg.properties.flavor: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["framework"]) -> MetaOapg.properties.framework: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["load_model_fn_module_path"] - ) -> MetaOapg.properties.load_model_fn_module_path: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["load_predict_fn_module_path"] - ) -> MetaOapg.properties.load_predict_fn_module_path: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["location"]) -> MetaOapg.properties.location: ... - @typing.overload - def get_item_oapg(self, name: typing_extensions.Literal["requirements"]) -> MetaOapg.properties.requirements: ... - @typing.overload - def get_item_oapg( - self, name: typing_extensions.Literal["app_config"] - ) -> typing.Union[MetaOapg.properties.app_config, schemas.Unset]: ... - @typing.overload - def get_item_oapg(self, name: str) -> typing.Union[schemas.UnsetAnyTypeSchema, schemas.Unset]: ... - def get_item_oapg( - self, - name: typing.Union[ - typing_extensions.Literal[ - "flavor", - "framework", - "load_model_fn_module_path", - "load_predict_fn_module_path", - "location", - "requirements", - "app_config", - ], - str, - ], - ): - return super().get_item_oapg(name) - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - ], - flavor: typing.Union[ - MetaOapg.properties.flavor, - str, - ], - requirements: typing.Union[ - MetaOapg.properties.requirements, - list, - tuple, - ], - framework: typing.Union[ - MetaOapg.properties.framework, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - bool, - None, - list, - tuple, - bytes, - io.FileIO, - io.BufferedReader, - ], - load_model_fn_module_path: typing.Union[ - MetaOapg.properties.load_model_fn_module_path, - str, - ], - location: typing.Union[ - MetaOapg.properties.location, - str, - ], - load_predict_fn_module_path: typing.Union[ - MetaOapg.properties.load_predict_fn_module_path, - str, - ], - app_config: typing.Union[ - MetaOapg.properties.app_config, dict, frozendict.frozendict, schemas.Unset - ] = schemas.unset, - _configuration: typing.Optional[schemas.Configuration] = None, - **kwargs: typing.Union[ - schemas.AnyTypeSchema, - dict, - frozendict.frozendict, - str, - date, - datetime, - uuid.UUID, - int, - float, - decimal.Decimal, - None, - list, - tuple, - bytes, - ], - ) -> "ZipArtifactFlavor": - return super().__new__( - cls, - *_args, - flavor=flavor, - requirements=requirements, - framework=framework, - load_model_fn_module_path=load_model_fn_module_path, - location=location, - load_predict_fn_module_path=load_predict_fn_module_path, - app_config=app_config, - _configuration=_configuration, - **kwargs, - ) - -from launch_client.model.custom_framework import CustomFramework -from launch_client.model.pytorch_framework import PytorchFramework -from launch_client.model.tensorflow_framework import TensorflowFramework diff --git a/launch/api_client/models/__init__.py b/launch/api_client/models/__init__.py index 6d8aeb39..5a993880 100644 --- a/launch/api_client/models/__init__.py +++ b/launch/api_client/models/__init__.py @@ -11,19 +11,124 @@ # import sys # sys.setrecursionlimit(n) +from launch.api_client.model.annotation import Annotation +from launch.api_client.model.audio import Audio +from launch.api_client.model.audio1 import Audio1 +from launch.api_client.model.audio2 import Audio2 +from launch.api_client.model.batch_completions_job import BatchCompletionsJob +from launch.api_client.model.batch_completions_job_status import ( + BatchCompletionsJobStatus, +) +from launch.api_client.model.batch_completions_model_config import ( + BatchCompletionsModelConfig, +) from launch.api_client.model.batch_job_serialization_format import ( BatchJobSerializationFormat, ) from launch.api_client.model.batch_job_status import BatchJobStatus -from launch.api_client.model.body_upload_file_v1_files_post import ( - BodyUploadFileV1FilesPost, -) from launch.api_client.model.callback_auth import CallbackAuth from launch.api_client.model.callback_basic_auth import CallbackBasicAuth from launch.api_client.model.callbackm_tls_auth import CallbackmTLSAuth +from launch.api_client.model.cancel_batch_completions_v2_response import ( + CancelBatchCompletionsV2Response, +) from launch.api_client.model.cancel_fine_tune_response import ( CancelFineTuneResponse, ) +from launch.api_client.model.chat_completion_function_call_option import ( + ChatCompletionFunctionCallOption, +) +from launch.api_client.model.chat_completion_functions import ( + ChatCompletionFunctions, +) +from launch.api_client.model.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall, +) +from launch.api_client.model.chat_completion_message_tool_call_chunk import ( + ChatCompletionMessageToolCallChunk, +) +from launch.api_client.model.chat_completion_message_tool_calls_input import ( + ChatCompletionMessageToolCallsInput, +) +from launch.api_client.model.chat_completion_message_tool_calls_output import ( + ChatCompletionMessageToolCallsOutput, +) +from launch.api_client.model.chat_completion_named_tool_choice import ( + ChatCompletionNamedToolChoice, +) +from launch.api_client.model.chat_completion_request_assistant_message import ( + ChatCompletionRequestAssistantMessage, +) +from launch.api_client.model.chat_completion_request_assistant_message_content_part import ( + ChatCompletionRequestAssistantMessageContentPart, +) +from launch.api_client.model.chat_completion_request_developer_message import ( + ChatCompletionRequestDeveloperMessage, +) +from launch.api_client.model.chat_completion_request_function_message import ( + ChatCompletionRequestFunctionMessage, +) +from launch.api_client.model.chat_completion_request_message import ( + ChatCompletionRequestMessage, +) +from launch.api_client.model.chat_completion_request_message_content_part_audio import ( + ChatCompletionRequestMessageContentPartAudio, +) +from launch.api_client.model.chat_completion_request_message_content_part_file import ( + ChatCompletionRequestMessageContentPartFile, +) +from launch.api_client.model.chat_completion_request_message_content_part_image import ( + ChatCompletionRequestMessageContentPartImage, +) +from launch.api_client.model.chat_completion_request_message_content_part_refusal import ( + ChatCompletionRequestMessageContentPartRefusal, +) +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) +from launch.api_client.model.chat_completion_request_system_message import ( + ChatCompletionRequestSystemMessage, +) +from launch.api_client.model.chat_completion_request_system_message_content_part import ( + ChatCompletionRequestSystemMessageContentPart, +) +from launch.api_client.model.chat_completion_request_tool_message import ( + ChatCompletionRequestToolMessage, +) +from launch.api_client.model.chat_completion_request_tool_message_content_part import ( + ChatCompletionRequestToolMessageContentPart, +) +from launch.api_client.model.chat_completion_request_user_message import ( + ChatCompletionRequestUserMessage, +) +from launch.api_client.model.chat_completion_request_user_message_content_part import ( + ChatCompletionRequestUserMessageContentPart, +) +from launch.api_client.model.chat_completion_response_message import ( + ChatCompletionResponseMessage, +) +from launch.api_client.model.chat_completion_stream_options import ( + ChatCompletionStreamOptions, +) +from launch.api_client.model.chat_completion_stream_response_delta import ( + ChatCompletionStreamResponseDelta, +) +from launch.api_client.model.chat_completion_token_logprob import ( + ChatCompletionTokenLogprob, +) +from launch.api_client.model.chat_completion_tool import ChatCompletionTool +from launch.api_client.model.chat_completion_tool_choice_option import ( + ChatCompletionToolChoiceOption, +) +from launch.api_client.model.chat_completion_v2_request import ( + ChatCompletionV2Request, +) +from launch.api_client.model.chat_completion_v2_stream_error_chunk import ( + ChatCompletionV2StreamErrorChunk, +) +from launch.api_client.model.choice import Choice +from launch.api_client.model.choice1 import Choice1 +from launch.api_client.model.choice2 import Choice2 from launch.api_client.model.clone_model_bundle_v1_request import ( CloneModelBundleV1Request, ) @@ -49,20 +154,37 @@ from launch.api_client.model.completion_sync_v1_response import ( CompletionSyncV1Response, ) +from launch.api_client.model.completion_tokens_details import ( + CompletionTokensDetails, +) +from launch.api_client.model.completion_usage import CompletionUsage +from launch.api_client.model.completion_v2_request import CompletionV2Request +from launch.api_client.model.completion_v2_stream_error_chunk import ( + CompletionV2StreamErrorChunk, +) +from launch.api_client.model.content import Content +from launch.api_client.model.content1 import Content1 +from launch.api_client.model.content2 import Content2 +from launch.api_client.model.content3 import Content3 +from launch.api_client.model.content4 import Content4 +from launch.api_client.model.content8 import Content8 from launch.api_client.model.create_async_task_v1_response import ( CreateAsyncTaskV1Response, ) -from launch.api_client.model.create_batch_completions_model_config import ( - CreateBatchCompletionsModelConfig, +from launch.api_client.model.create_batch_completions_v1_model_config import ( + CreateBatchCompletionsV1ModelConfig, ) -from launch.api_client.model.create_batch_completions_request import ( - CreateBatchCompletionsRequest, +from launch.api_client.model.create_batch_completions_v1_request import ( + CreateBatchCompletionsV1Request, ) -from launch.api_client.model.create_batch_completions_request_content import ( - CreateBatchCompletionsRequestContent, +from launch.api_client.model.create_batch_completions_v1_request_content import ( + CreateBatchCompletionsV1RequestContent, ) -from launch.api_client.model.create_batch_completions_response import ( - CreateBatchCompletionsResponse, +from launch.api_client.model.create_batch_completions_v1_response import ( + CreateBatchCompletionsV1Response, +) +from launch.api_client.model.create_batch_completions_v2_request import ( + CreateBatchCompletionsV2Request, ) from launch.api_client.model.create_batch_job_resource_requests import ( CreateBatchJobResourceRequests, @@ -73,6 +195,18 @@ from launch.api_client.model.create_batch_job_v1_response import ( CreateBatchJobV1Response, ) +from launch.api_client.model.create_chat_completion_response import ( + CreateChatCompletionResponse, +) +from launch.api_client.model.create_chat_completion_stream_response import ( + CreateChatCompletionStreamResponse, +) +from launch.api_client.model.create_completion_response import ( + CreateCompletionResponse, +) +from launch.api_client.model.create_deep_speed_model_endpoint_request import ( + CreateDeepSpeedModelEndpointRequest, +) from launch.api_client.model.create_docker_image_batch_job_bundle_v1_request import ( CreateDockerImageBatchJobBundleV1Request, ) @@ -94,6 +228,9 @@ from launch.api_client.model.create_fine_tune_response import ( CreateFineTuneResponse, ) +from launch.api_client.model.create_light_llm_model_endpoint_request import ( + CreateLightLLMModelEndpointRequest, +) from launch.api_client.model.create_llm_model_endpoint_v1_request import ( CreateLLMModelEndpointV1Request, ) @@ -118,12 +255,24 @@ from launch.api_client.model.create_model_endpoint_v1_response import ( CreateModelEndpointV1Response, ) +from launch.api_client.model.create_sg_lang_model_endpoint_request import ( + CreateSGLangModelEndpointRequest, +) +from launch.api_client.model.create_tensor_rtllm_model_endpoint_request import ( + CreateTensorRTLLMModelEndpointRequest, +) +from launch.api_client.model.create_text_generation_inference_model_endpoint_request import ( + CreateTextGenerationInferenceModelEndpointRequest, +) from launch.api_client.model.create_trigger_v1_request import ( CreateTriggerV1Request, ) from launch.api_client.model.create_trigger_v1_response import ( CreateTriggerV1Response, ) +from launch.api_client.model.create_vllm_model_endpoint_request import ( + CreateVLLMModelEndpointRequest, +) from launch.api_client.model.custom_framework import CustomFramework from launch.api_client.model.delete_file_response import DeleteFileResponse from launch.api_client.model.delete_llm_endpoint_response import ( @@ -142,9 +291,26 @@ from launch.api_client.model.endpoint_predict_v1_request import ( EndpointPredictV1Request, ) +from launch.api_client.model.file import File +from launch.api_client.model.filtered_chat_completion_v2_request import ( + FilteredChatCompletionV2Request, +) +from launch.api_client.model.filtered_completion_v2_request import ( + FilteredCompletionV2Request, +) +from launch.api_client.model.function1 import Function1 +from launch.api_client.model.function2 import Function2 +from launch.api_client.model.function3 import Function3 +from launch.api_client.model.function_call import FunctionCall +from launch.api_client.model.function_call2 import FunctionCall2 +from launch.api_client.model.function_object import FunctionObject +from launch.api_client.model.function_parameters import FunctionParameters from launch.api_client.model.get_async_task_v1_response import ( GetAsyncTaskV1Response, ) +from launch.api_client.model.get_batch_completion_v2_response import ( + GetBatchCompletionV2Response, +) from launch.api_client.model.get_batch_job_v1_response import ( GetBatchJobV1Response, ) @@ -170,6 +336,9 @@ ) from launch.api_client.model.gpu_type import GpuType from launch.api_client.model.http_validation_error import HTTPValidationError +from launch.api_client.model.image_url import ImageUrl +from launch.api_client.model.input_audio import InputAudio +from launch.api_client.model.json_schema import JsonSchema from launch.api_client.model.list_docker_image_batch_job_bundle_v1_response import ( ListDockerImageBatchJobBundleV1Response, ) @@ -200,6 +369,9 @@ LLMInferenceFramework, ) from launch.api_client.model.llm_source import LLMSource +from launch.api_client.model.logprobs import Logprobs +from launch.api_client.model.logprobs2 import Logprobs2 +from launch.api_client.model.metadata import Metadata from launch.api_client.model.model_bundle_environment_params import ( ModelBundleEnvironmentParams, ) @@ -231,11 +403,35 @@ ) from launch.api_client.model.model_endpoint_status import ModelEndpointStatus from launch.api_client.model.model_endpoint_type import ModelEndpointType +from launch.api_client.model.parallel_tool_calls import ParallelToolCalls +from launch.api_client.model.prediction_content import PredictionContent +from launch.api_client.model.prompt import Prompt +from launch.api_client.model.prompt1 import Prompt1 +from launch.api_client.model.prompt1_item import Prompt1Item +from launch.api_client.model.prompt_tokens_details import PromptTokensDetails from launch.api_client.model.pytorch_framework import PytorchFramework from launch.api_client.model.quantization import Quantization +from launch.api_client.model.reasoning_effort import ReasoningEffort from launch.api_client.model.request_schema import RequestSchema +from launch.api_client.model.response_format_json_object import ( + ResponseFormatJsonObject, +) +from launch.api_client.model.response_format_json_schema import ( + ResponseFormatJsonSchema, +) +from launch.api_client.model.response_format_json_schema_schema import ( + ResponseFormatJsonSchemaSchema, +) +from launch.api_client.model.response_format_text import ResponseFormatText +from launch.api_client.model.response_modalities import ResponseModalities from launch.api_client.model.response_schema import ResponseSchema +from launch.api_client.model.restart_model_endpoint_v1_response import ( + RestartModelEndpointV1Response, +) from launch.api_client.model.runnable_image_flavor import RunnableImageFlavor +from launch.api_client.model.service_tier import ServiceTier +from launch.api_client.model.stop_configuration import StopConfiguration +from launch.api_client.model.stop_configuration1 import StopConfiguration1 from launch.api_client.model.stream_error import StreamError from launch.api_client.model.stream_error_content import StreamErrorContent from launch.api_client.model.streaming_enhanced_runnable_image_flavor import ( @@ -251,15 +447,25 @@ from launch.api_client.model.tensorflow_framework import TensorflowFramework from launch.api_client.model.token_output import TokenOutput from launch.api_client.model.tool_config import ToolConfig +from launch.api_client.model.top_logprob import TopLogprob from launch.api_client.model.triton_enhanced_runnable_image_flavor import ( TritonEnhancedRunnableImageFlavor, ) +from launch.api_client.model.update_batch_completions_v2_request import ( + UpdateBatchCompletionsV2Request, +) +from launch.api_client.model.update_batch_completions_v2_response import ( + UpdateBatchCompletionsV2Response, +) from launch.api_client.model.update_batch_job_v1_request import ( UpdateBatchJobV1Request, ) from launch.api_client.model.update_batch_job_v1_response import ( UpdateBatchJobV1Response, ) +from launch.api_client.model.update_deep_speed_model_endpoint_request import ( + UpdateDeepSpeedModelEndpointRequest, +) from launch.api_client.model.update_docker_image_batch_job_v1_request import ( UpdateDockerImageBatchJobV1Request, ) @@ -278,12 +484,29 @@ from launch.api_client.model.update_model_endpoint_v1_response import ( UpdateModelEndpointV1Response, ) +from launch.api_client.model.update_sg_lang_model_endpoint_request import ( + UpdateSGLangModelEndpointRequest, +) +from launch.api_client.model.update_text_generation_inference_model_endpoint_request import ( + UpdateTextGenerationInferenceModelEndpointRequest, +) from launch.api_client.model.update_trigger_v1_request import ( UpdateTriggerV1Request, ) from launch.api_client.model.update_trigger_v1_response import ( UpdateTriggerV1Response, ) +from launch.api_client.model.update_vllm_model_endpoint_request import ( + UpdateVLLMModelEndpointRequest, +) from launch.api_client.model.upload_file_response import UploadFileResponse +from launch.api_client.model.url_citation import UrlCitation +from launch.api_client.model.user_location import UserLocation from launch.api_client.model.validation_error import ValidationError +from launch.api_client.model.voice_ids_shared import VoiceIdsShared +from launch.api_client.model.web_search_context_size import ( + WebSearchContextSize, +) +from launch.api_client.model.web_search_location import WebSearchLocation +from launch.api_client.model.web_search_options import WebSearchOptions from launch.api_client.model.zip_artifact_flavor import ZipArtifactFlavor diff --git a/launch/api_client/paths/__init__.py b/launch/api_client/paths/__init__.py index 0d06ae60..460b9fd4 100644 --- a/launch/api_client/paths/__init__.py +++ b/launch/api_client/paths/__init__.py @@ -6,46 +6,50 @@ class PathValues(str, enum.Enum): - HEALTHCHECK = "/healthcheck" - HEALTHZ = "/healthz" - READYZ = "/readyz" - V1_ASYNCTASKS = "/v1/async-tasks" - V1_ASYNCTASKS_TASK_ID = "/v1/async-tasks/{task_id}" V1_BATCHJOBS = "/v1/batch-jobs" V1_BATCHJOBS_BATCH_JOB_ID = "/v1/batch-jobs/{batch_job_id}" - V1_DOCKERIMAGEBATCHJOBBUNDLES = "/v1/docker-image-batch-job-bundles" - V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST = "/v1/docker-image-batch-job-bundles/latest" - V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID = ( - "/v1/docker-image-batch-job-bundles/{docker_image_batch_job_bundle_id}" - ) V1_DOCKERIMAGEBATCHJOBS = "/v1/docker-image-batch-jobs" V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID = "/v1/docker-image-batch-jobs/{batch_job_id}" - V1_FILES = "/v1/files" - V1_FILES_FILE_ID = "/v1/files/{file_id}" - V1_FILES_FILE_ID_CONTENT = "/v1/files/{file_id}/content" - V1_LLM_BATCHCOMPLETIONS = "/v1/llm/batch-completions" - V1_LLM_COMPLETIONSSTREAM = "/v1/llm/completions-stream" - V1_LLM_COMPLETIONSSYNC = "/v1/llm/completions-sync" - V1_LLM_FINETUNES = "/v1/llm/fine-tunes" - V1_LLM_FINETUNES_FINE_TUNE_ID = "/v1/llm/fine-tunes/{fine_tune_id}" - V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL = "/v1/llm/fine-tunes/{fine_tune_id}/cancel" - V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS = "/v1/llm/fine-tunes/{fine_tune_id}/events" - V1_LLM_MODELENDPOINTS = "/v1/llm/model-endpoints" - V1_LLM_MODELENDPOINTS_DOWNLOAD = "/v1/llm/model-endpoints/download" - V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME = "/v1/llm/model-endpoints/{model_endpoint_name}" + V1_ASYNCTASKS = "/v1/async-tasks" + V1_ASYNCTASKS_TASK_ID = "/v1/async-tasks/{task_id}" + V1_SYNCTASKS = "/v1/sync-tasks" + V1_STREAMINGTASKS = "/v1/streaming-tasks" V1_MODELBUNDLES = "/v1/model-bundles" V1_MODELBUNDLES_CLONEWITHCHANGES = "/v1/model-bundles/clone-with-changes" V1_MODELBUNDLES_LATEST = "/v1/model-bundles/latest" V1_MODELBUNDLES_MODEL_BUNDLE_ID = "/v1/model-bundles/{model_bundle_id}" - V1_MODELENDPOINTS = "/v1/model-endpoints" - V1_MODELENDPOINTSAPI = "/v1/model-endpoints-api" - V1_MODELENDPOINTSSCHEMA_JSON = "/v1/model-endpoints-schema.json" - V1_MODELENDPOINTS_MODEL_ENDPOINT_ID = "/v1/model-endpoints/{model_endpoint_id}" - V1_STREAMINGTASKS = "/v1/streaming-tasks" - V1_SYNCTASKS = "/v1/sync-tasks" - V1_TRIGGERS = "/v1/triggers" - V1_TRIGGERS_TRIGGER_ID = "/v1/triggers/{trigger_id}" V2_MODELBUNDLES = "/v2/model-bundles" V2_MODELBUNDLES_CLONEWITHCHANGES = "/v2/model-bundles/clone-with-changes" V2_MODELBUNDLES_LATEST = "/v2/model-bundles/latest" V2_MODELBUNDLES_MODEL_BUNDLE_ID = "/v2/model-bundles/{model_bundle_id}" + V1_MODELENDPOINTS = "/v1/model-endpoints" + V1_MODELENDPOINTS_MODEL_ENDPOINT_ID = "/v1/model-endpoints/{model_endpoint_id}" + V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART = "/v1/model-endpoints/{model_endpoint_id}/restart" + V1_MODELENDPOINTSSCHEMA_JSON = "/v1/model-endpoints-schema.json" + V1_MODELENDPOINTSAPI = "/v1/model-endpoints-api" + V1_DOCKERIMAGEBATCHJOBBUNDLES = "/v1/docker-image-batch-job-bundles" + V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST = "/v1/docker-image-batch-job-bundles/latest" + V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID = "/v1/docker-image-batch-job-bundles/{docker_image_batch_job_bundle_id}" + V1_LLM_MODELENDPOINTS = "/v1/llm/model-endpoints" + V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME = "/v1/llm/model-endpoints/{model_endpoint_name}" + V1_LLM_COMPLETIONSSYNC = "/v1/llm/completions-sync" + V1_LLM_COMPLETIONSSTREAM = "/v1/llm/completions-stream" + V1_LLM_FINETUNES = "/v1/llm/fine-tunes" + V1_LLM_FINETUNES_FINE_TUNE_ID = "/v1/llm/fine-tunes/{fine_tune_id}" + V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL = "/v1/llm/fine-tunes/{fine_tune_id}/cancel" + V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS = "/v1/llm/fine-tunes/{fine_tune_id}/events" + V1_LLM_MODELENDPOINTS_DOWNLOAD = "/v1/llm/model-endpoints/download" + V1_LLM_BATCHCOMPLETIONS = "/v1/llm/batch-completions" + V1_FILES = "/v1/files" + V1_FILES_FILE_ID = "/v1/files/{file_id}" + V1_FILES_FILE_ID_CONTENT = "/v1/files/{file_id}/content" + V1_TRIGGERS = "/v1/triggers" + V1_TRIGGERS_TRIGGER_ID = "/v1/triggers/{trigger_id}" + V2_BATCHCOMPLETIONS = "/v2/batch-completions" + V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID = "/v2/batch-completions/{batch_completion_id}" + V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL = "/v2/batch-completions/{batch_completion_id}/actions/cancel" + V2_CHAT_COMPLETIONS = "/v2/chat/completions" + V2_COMPLETIONS = "/v2/completions" + HEALTHCHECK = "/healthcheck" + HEALTHZ = "/healthz" + READYZ = "/readyz" diff --git a/launch/api_client/paths/healthcheck/__init__.py b/launch/api_client/paths/healthcheck/__init__.py index ae6dcb46..b0eea9c5 100644 --- a/launch/api_client/paths/healthcheck/__init__.py +++ b/launch/api_client/paths/healthcheck/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.HEALTHCHECK +path = PathValues.HEALTHCHECK \ No newline at end of file diff --git a/launch/api_client/paths/healthcheck/get.py b/launch/api_client/paths/healthcheck/get.py index 4cbeec4b..62989a36 100644 --- a/launch/api_client/paths/healthcheck/get.py +++ b/launch/api_client/paths/healthcheck/get.py @@ -31,20 +31,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -55,8 +60,9 @@ def _healthcheck_healthcheck_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _healthcheck_healthcheck_get_oapg( @@ -65,8 +71,7 @@ def _healthcheck_healthcheck_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _healthcheck_healthcheck_get_oapg( @@ -75,8 +80,10 @@ def _healthcheck_healthcheck_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _healthcheck_healthcheck_get_oapg( self, @@ -97,11 +104,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, stream=stream, timeout=timeout, @@ -117,7 +124,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -132,8 +143,9 @@ def healthcheck_healthcheck_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def healthcheck_healthcheck_get( @@ -142,8 +154,7 @@ def healthcheck_healthcheck_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def healthcheck_healthcheck_get( @@ -152,8 +163,10 @@ def healthcheck_healthcheck_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def healthcheck_healthcheck_get( self, @@ -166,7 +179,7 @@ def healthcheck_healthcheck_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -180,8 +193,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -190,8 +204,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -200,8 +213,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -214,5 +229,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/healthcheck/get.pyi b/launch/api_client/paths/healthcheck/get.pyi deleted file mode 100644 index 519a05dc..00000000 --- a/launch/api_client/paths/healthcheck/get.pyi +++ /dev/null @@ -1,189 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _healthcheck_healthcheck_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _healthcheck_healthcheck_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _healthcheck_healthcheck_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _healthcheck_healthcheck_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Healthcheck - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class HealthcheckHealthcheckGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def healthcheck_healthcheck_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def healthcheck_healthcheck_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def healthcheck_healthcheck_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def healthcheck_healthcheck_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthcheck_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthcheck_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/healthz/__init__.py b/launch/api_client/paths/healthz/__init__.py index 3253e712..b4e79532 100644 --- a/launch/api_client/paths/healthz/__init__.py +++ b/launch/api_client/paths/healthz/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.HEALTHZ +path = PathValues.HEALTHZ \ No newline at end of file diff --git a/launch/api_client/paths/healthz/get.py b/launch/api_client/paths/healthz/get.py index c71e242f..4ad812ce 100644 --- a/launch/api_client/paths/healthz/get.py +++ b/launch/api_client/paths/healthz/get.py @@ -31,20 +31,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -55,8 +60,9 @@ def _healthcheck_healthz_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _healthcheck_healthz_get_oapg( @@ -65,8 +71,7 @@ def _healthcheck_healthz_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _healthcheck_healthz_get_oapg( @@ -75,8 +80,10 @@ def _healthcheck_healthz_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _healthcheck_healthz_get_oapg( self, @@ -97,11 +104,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, stream=stream, timeout=timeout, @@ -117,7 +124,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -132,8 +143,9 @@ def healthcheck_healthz_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def healthcheck_healthz_get( @@ -142,8 +154,7 @@ def healthcheck_healthz_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def healthcheck_healthz_get( @@ -152,8 +163,10 @@ def healthcheck_healthz_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def healthcheck_healthz_get( self, @@ -166,7 +179,7 @@ def healthcheck_healthz_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -180,8 +193,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -190,8 +204,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -200,8 +213,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -214,5 +229,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/healthz/get.pyi b/launch/api_client/paths/healthz/get.pyi deleted file mode 100644 index e80889c1..00000000 --- a/launch/api_client/paths/healthz/get.pyi +++ /dev/null @@ -1,189 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _healthcheck_healthz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _healthcheck_healthz_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _healthcheck_healthz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _healthcheck_healthz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Healthcheck - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class HealthcheckHealthzGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def healthcheck_healthz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def healthcheck_healthz_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def healthcheck_healthz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def healthcheck_healthz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_healthz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/readyz/__init__.py b/launch/api_client/paths/readyz/__init__.py index 9b49ccf0..4b5a5af4 100644 --- a/launch/api_client/paths/readyz/__init__.py +++ b/launch/api_client/paths/readyz/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.READYZ +path = PathValues.READYZ \ No newline at end of file diff --git a/launch/api_client/paths/readyz/get.py b/launch/api_client/paths/readyz/get.py index c016a017..1e946596 100644 --- a/launch/api_client/paths/readyz/get.py +++ b/launch/api_client/paths/readyz/get.py @@ -31,20 +31,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -55,8 +60,9 @@ def _healthcheck_readyz_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _healthcheck_readyz_get_oapg( @@ -65,8 +71,7 @@ def _healthcheck_readyz_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _healthcheck_readyz_get_oapg( @@ -75,8 +80,10 @@ def _healthcheck_readyz_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _healthcheck_readyz_get_oapg( self, @@ -97,11 +104,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, stream=stream, timeout=timeout, @@ -117,7 +124,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -132,8 +143,9 @@ def healthcheck_readyz_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def healthcheck_readyz_get( @@ -142,8 +154,7 @@ def healthcheck_readyz_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def healthcheck_readyz_get( @@ -152,8 +163,10 @@ def healthcheck_readyz_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def healthcheck_readyz_get( self, @@ -166,7 +179,7 @@ def healthcheck_readyz_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -180,8 +193,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -190,8 +204,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -200,8 +213,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -214,5 +229,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/readyz/get.pyi b/launch/api_client/paths/readyz/get.pyi deleted file mode 100644 index 7ef0ef1c..00000000 --- a/launch/api_client/paths/readyz/get.pyi +++ /dev/null @@ -1,189 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _healthcheck_readyz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _healthcheck_readyz_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _healthcheck_readyz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _healthcheck_readyz_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Healthcheck - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class HealthcheckReadyzGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def healthcheck_readyz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def healthcheck_readyz_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def healthcheck_readyz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def healthcheck_readyz_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_readyz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._healthcheck_readyz_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_async_tasks/__init__.py b/launch/api_client/paths/v1_async_tasks/__init__.py index aafe0cc4..72df600f 100644 --- a/launch/api_client/paths/v1_async_tasks/__init__.py +++ b/launch/api_client/paths/v1_async_tasks/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_ASYNCTASKS +path = PathValues.V1_ASYNCTASKS \ No newline at end of file diff --git a/launch/api_client/paths/v1_async_tasks/post.py b/launch/api_client/paths/v1_async_tasks/post.py index 7f3c6fd2..f91beb6c 100644 --- a/launch/api_client/paths/v1_async_tasks/post.py +++ b/launch/api_client/paths/v1_async_tasks/post.py @@ -35,15 +35,17 @@ # Query params ModelEndpointIdSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -63,12 +65,14 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) request_body_endpoint_predict_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateAsyncTaskV1Response @@ -76,14 +80,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -92,21 +99,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -120,8 +132,9 @@ def _create_async_inference_task_v1_async_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_async_inference_task_v1_async_tasks_post_oapg( @@ -133,8 +146,10 @@ def _create_async_inference_task_v1_async_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_async_inference_task_v1_async_tasks_post_oapg( @@ -146,8 +161,7 @@ def _create_async_inference_task_v1_async_tasks_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_async_inference_task_v1_async_tasks_post_oapg( @@ -159,13 +173,15 @@ def _create_async_inference_task_v1_async_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_async_inference_task_v1_async_tasks_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -182,7 +198,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_id,): + for parameter in ( + request_query_model_endpoint_id, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -192,27 +210,26 @@ class instances for serialized_value in serialized_data.values(): used_path += serialized_value - _headers = HTTPHeaderDict(self.api_client.default_headers) + _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -231,7 +248,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -249,8 +270,9 @@ def create_async_inference_task_v1_async_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_async_inference_task_v1_async_tasks_post( @@ -262,8 +284,10 @@ def create_async_inference_task_v1_async_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_async_inference_task_v1_async_tasks_post( @@ -275,8 +299,7 @@ def create_async_inference_task_v1_async_tasks_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_async_inference_task_v1_async_tasks_post( @@ -288,13 +311,15 @@ def create_async_inference_task_v1_async_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_async_inference_task_v1_async_tasks_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -308,7 +333,7 @@ def create_async_inference_task_v1_async_tasks_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -325,8 +350,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -338,8 +364,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -351,8 +379,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -364,13 +391,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -384,5 +413,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_async_tasks/post.pyi b/launch/api_client/paths/v1_async_tasks/post.pyi deleted file mode 100644 index 3e863a0d..00000000 --- a/launch/api_client/paths/v1_async_tasks/post.pyi +++ /dev/null @@ -1,344 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_async_task_v1_response import ( - CreateAsyncTaskV1Response, -) -from launch_client.model.endpoint_predict_v1_request import ( - EndpointPredictV1Request, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_endpoint_id = api_client.QueryParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointIdSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = EndpointPredictV1Request - -request_body_endpoint_predict_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateAsyncTaskV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_async_inference_task_v1_async_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Async Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_id,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateAsyncInferenceTaskV1AsyncTasksPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_async_inference_task_v1_async_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_async_inference_task_v1_async_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_async_inference_task_v1_async_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_async_tasks_task_id/__init__.py b/launch/api_client/paths/v1_async_tasks_task_id/__init__.py index 21c8a0fc..ef09b1e6 100644 --- a/launch/api_client/paths/v1_async_tasks_task_id/__init__.py +++ b/launch/api_client/paths/v1_async_tasks_task_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_ASYNCTASKS_TASK_ID +path = PathValues.V1_ASYNCTASKS_TASK_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_async_tasks_task_id/get.py b/launch/api_client/paths/v1_async_tasks_task_id/get.py index 7bb49fa2..c0dec01b 100644 --- a/launch/api_client/paths/v1_async_tasks_task_id/get.py +++ b/launch/api_client/paths/v1_async_tasks_task_id/get.py @@ -32,15 +32,17 @@ # Path params TaskIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'task_id': typing.Union[TaskIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "task_id": typing.Union[ - TaskIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetAsyncTaskV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( @@ -115,8 +127,7 @@ def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( @@ -126,8 +137,10 @@ def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_task_id,): + for parameter in ( + request_path_task_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_async_inference_task_v1_async_tasks_task_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_async_inference_task_v1_async_tasks_task_id_get( @@ -209,8 +229,7 @@ def get_async_inference_task_v1_async_tasks_task_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_async_inference_task_v1_async_tasks_task_id_get( @@ -220,8 +239,10 @@ def get_async_inference_task_v1_async_tasks_task_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_async_inference_task_v1_async_tasks_task_id_get( self, @@ -236,7 +257,7 @@ def get_async_inference_task_v1_async_tasks_task_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_async_tasks_task_id/get.pyi b/launch/api_client/paths/v1_async_tasks_task_id/get.pyi deleted file mode 100644 index c730023f..00000000 --- a/launch/api_client/paths/v1_async_tasks_task_id/get.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_async_task_v1_response import ( - GetAsyncTaskV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -TaskIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "task_id": typing.Union[ - TaskIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_task_id = api_client.PathParameter( - name="task_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TaskIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetAsyncTaskV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_async_inference_task_v1_async_tasks_task_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Async Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_task_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetAsyncInferenceTaskV1AsyncTasksTaskIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_async_inference_task_v1_async_tasks_task_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_async_inference_task_v1_async_tasks_task_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_async_inference_task_v1_async_tasks_task_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_batch_jobs/__init__.py b/launch/api_client/paths/v1_batch_jobs/__init__.py index 3d5cef58..65e727af 100644 --- a/launch/api_client/paths/v1_batch_jobs/__init__.py +++ b/launch/api_client/paths/v1_batch_jobs/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_BATCHJOBS +path = PathValues.V1_BATCHJOBS \ No newline at end of file diff --git a/launch/api_client/paths/v1_batch_jobs/post.py b/launch/api_client/paths/v1_batch_jobs/post.py index f8769488..d87f8229 100644 --- a/launch/api_client/paths/v1_batch_jobs/post.py +++ b/launch/api_client/paths/v1_batch_jobs/post.py @@ -38,12 +38,14 @@ request_body_create_batch_job_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateBatchJobV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_batch_job_v1_batch_jobs_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_batch_job_v1_batch_jobs_post_oapg( @@ -106,8 +117,10 @@ def _create_batch_job_v1_batch_jobs_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_batch_job_v1_batch_jobs_post_oapg( @@ -118,8 +131,7 @@ def _create_batch_job_v1_batch_jobs_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_batch_job_v1_batch_jobs_post_oapg( @@ -130,13 +142,15 @@ def _create_batch_job_v1_batch_jobs_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_batch_job_v1_batch_jobs_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_batch_job_v1_batch_jobs_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_batch_job_v1_batch_jobs_post( @@ -218,8 +236,10 @@ def create_batch_job_v1_batch_jobs_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_batch_job_v1_batch_jobs_post( @@ -230,8 +250,7 @@ def create_batch_job_v1_batch_jobs_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_batch_job_v1_batch_jobs_post( @@ -242,13 +261,15 @@ def create_batch_job_v1_batch_jobs_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_batch_job_v1_batch_jobs_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_batch_job_v1_batch_jobs_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_batch_jobs/post.pyi b/launch/api_client/paths/v1_batch_jobs/post.pyi deleted file mode 100644 index 0f199580..00000000 --- a/launch/api_client/paths/v1_batch_jobs/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_batch_job_v1_request import ( - CreateBatchJobV1Request, -) -from launch_client.model.create_batch_job_v1_response import ( - CreateBatchJobV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CreateBatchJobV1Request - -request_body_create_batch_job_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateBatchJobV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_batch_job_v1_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_create_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateBatchJobV1BatchJobsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_batch_job_v1_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_batch_job_v1_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_batch_job_v1_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/__init__.py b/launch/api_client/paths/v1_batch_jobs_batch_job_id/__init__.py index 3fa32eda..0acdf21c 100644 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/__init__.py +++ b/launch/api_client/paths/v1_batch_jobs_batch_job_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_BATCHJOBS_BATCH_JOB_ID +path = PathValues.V1_BATCHJOBS_BATCH_JOB_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.py b/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.py index f8a0cc74..ad9a632d 100644 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.py +++ b/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.py @@ -32,15 +32,17 @@ # Path params BatchJobIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetBatchJobV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( @@ -115,8 +127,7 @@ def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( @@ -126,8 +137,10 @@ def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_batch_job_id,): + for parameter in ( + request_path_batch_job_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_batch_job_v1_batch_jobs_batch_job_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_batch_job_v1_batch_jobs_batch_job_id_get( @@ -209,8 +229,7 @@ def get_batch_job_v1_batch_jobs_batch_job_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_batch_job_v1_batch_jobs_batch_job_id_get( @@ -220,8 +239,10 @@ def get_batch_job_v1_batch_jobs_batch_job_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_batch_job_v1_batch_jobs_batch_job_id_get( self, @@ -236,7 +257,7 @@ def get_batch_job_v1_batch_jobs_batch_job_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.pyi b/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.pyi deleted file mode 100644 index 2c9aba69..00000000 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/get.pyi +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_batch_job_v1_response import GetBatchJobV1Response -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetBatchJobV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_batch_job_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetBatchJobV1BatchJobsBatchJobIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_batch_job_v1_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_batch_job_v1_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.py b/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.py index fdd87da6..0dd75ff8 100644 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.py +++ b/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.py @@ -35,15 +35,17 @@ # Path params BatchJobIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -62,12 +64,14 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): request_body_update_batch_job_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = UpdateBatchJobV1Response @@ -75,14 +79,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -91,21 +98,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -119,8 +131,9 @@ def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( @@ -132,8 +145,10 @@ def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( @@ -145,8 +160,7 @@ def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( @@ -158,13 +172,15 @@ def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -181,7 +197,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_batch_job_id,): + for parameter in ( + request_path_batch_job_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -189,29 +207,28 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_update_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='put'.upper(), headers=_headers, fields=_fields, body=_body, @@ -230,7 +247,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -248,8 +269,9 @@ def update_batch_job_v1_batch_jobs_batch_job_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def update_batch_job_v1_batch_jobs_batch_job_id_put( @@ -261,8 +283,10 @@ def update_batch_job_v1_batch_jobs_batch_job_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def update_batch_job_v1_batch_jobs_batch_job_id_put( @@ -274,8 +298,7 @@ def update_batch_job_v1_batch_jobs_batch_job_id_put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def update_batch_job_v1_batch_jobs_batch_job_id_put( @@ -287,13 +310,15 @@ def update_batch_job_v1_batch_jobs_batch_job_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def update_batch_job_v1_batch_jobs_batch_job_id_put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -307,7 +332,7 @@ def update_batch_job_v1_batch_jobs_batch_job_id_put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -324,8 +349,9 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def put( @@ -337,8 +363,10 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def put( @@ -350,8 +378,7 @@ def put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def put( @@ -363,13 +390,15 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -383,5 +412,7 @@ def put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.pyi b/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.pyi deleted file mode 100644 index 65a06365..00000000 --- a/launch/api_client/paths/v1_batch_jobs_batch_job_id/put.pyi +++ /dev/null @@ -1,343 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.update_batch_job_v1_request import ( - UpdateBatchJobV1Request, -) -from launch_client.model.update_batch_job_v1_response import ( - UpdateBatchJobV1Response, -) -from urllib3._collections import HTTPHeaderDict - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateBatchJobV1Request - -request_body_update_batch_job_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = UpdateBatchJobV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_batch_job_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_update_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="put".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class UpdateBatchJobV1BatchJobsBatchJobIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def update_batch_job_v1_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_batch_job_v1_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/__init__.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles/__init__.py index dbe64811..588a7c71 100644 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/__init__.py +++ b/launch/api_client/paths/v1_docker_image_batch_job_bundles/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES +path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.py index 57875b0a..6e74c613 100644 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.py +++ b/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.py @@ -31,19 +31,39 @@ from . import path # Query params -BundleNameSchema = schemas.StrSchema + + +class BundleNameSchema( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'BundleNameSchema': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) +RequestRequiredQueryParams = typing_extensions.TypedDict( + 'RequestRequiredQueryParams', + { + } +) RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", + 'RequestOptionalQueryParams', { - "bundle_name": typing.Union[ - BundleNameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], + 'bundle_name': typing.Union[BundleNameSchema, None, str, ], + 'order_by': typing.Union[OrderBySchema, ], }, - total=False, + total=False ) @@ -64,7 +84,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListDockerImageBatchJobBundleV1Response @@ -72,14 +93,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -88,21 +112,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -114,8 +143,9 @@ def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( @@ -125,8 +155,7 @@ def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( @@ -136,8 +165,10 @@ def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( self, @@ -174,11 +205,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -195,7 +226,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -211,8 +246,9 @@ def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_ stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( @@ -222,8 +258,7 @@ def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_ accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( @@ -233,8 +268,10 @@ def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_ stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( self, @@ -249,7 +286,7 @@ def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_ accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -264,8 +301,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -275,8 +313,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -286,8 +323,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -302,5 +341,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.pyi b/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.pyi deleted file mode 100644 index b29866ce..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/get.pyi +++ /dev/null @@ -1,269 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.list_docker_image_batch_job_bundle_v1_response import ( - ListDockerImageBatchJobBundleV1Response, -) -from launch_client.model.model_bundle_order_by import ModelBundleOrderBy -from urllib3._collections import HTTPHeaderDict - -# Query params -BundleNameSchema = schemas.StrSchema -OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) -RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", - { - "bundle_name": typing.Union[ - BundleNameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], - }, - total=False, -) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_bundle_name = api_client.QueryParameter( - name="bundle_name", - style=api_client.ParameterStyle.FORM, - schema=BundleNameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ListDockerImageBatchJobBundleV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Docker Image Batch Job Model Bundles - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_bundle_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListDockerImageBatchJobModelBundlesV1DockerImageBatchJobBundlesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.py index 5c500bb9..14c33e8e 100644 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.py +++ b/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.py @@ -38,12 +38,14 @@ request_body_create_docker_image_batch_job_bundle_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateDockerImageBatchJobBundleV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( @@ -106,8 +117,10 @@ def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( @@ -118,8 +131,7 @@ def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( @@ -130,13 +142,15 @@ def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_docker_image_batch_job_bundle_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( @@ -218,8 +236,10 @@ def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( @@ -230,8 +250,7 @@ def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( @@ -242,13 +261,15 @@ def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.pyi b/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.pyi deleted file mode 100644 index 7b31efbc..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_docker_image_batch_job_bundle_v1_request import ( - CreateDockerImageBatchJobBundleV1Request, -) -from launch_client.model.create_docker_image_batch_job_bundle_v1_response import ( - CreateDockerImageBatchJobBundleV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CreateDockerImageBatchJobBundleV1Request - -request_body_create_docker_image_batch_job_bundle_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateDockerImageBatchJobBundleV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Docker Image Batch Job Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_create_docker_image_batch_job_bundle_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateDockerImageBatchJobBundleV1DockerImageBatchJobBundlesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py index 15dcf50f..501aeb6d 100644 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py +++ b/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID +path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_DOCKER_IMAGE_BATCH_JOB_BUNDLE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.py index 79a88ba2..5b5faa24 100644 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.py +++ b/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.py @@ -32,15 +32,17 @@ # Path params DockerImageBatchJobBundleIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'docker_image_batch_job_bundle_id': typing.Union[DockerImageBatchJobBundleIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "docker_image_batch_job_bundle_id": typing.Union[ - DockerImageBatchJobBundleIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = DockerImageBatchJobBundleV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_d stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( @@ -115,8 +127,7 @@ def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_d accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( @@ -126,8 +137,10 @@ def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_d stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_docker_image_batch_job_bundle_id,): + for parameter in ( + request_path_docker_image_batch_job_bundle_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_do stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( @@ -209,8 +229,7 @@ def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_do accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( @@ -220,8 +239,10 @@ def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_do stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( self, @@ -236,7 +257,7 @@ def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_do accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.pyi b/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.pyi deleted file mode 100644 index 387e36c1..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/get.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -DockerImageBatchJobBundleIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "docker_image_batch_job_bundle_id": typing.Union[ - DockerImageBatchJobBundleIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_docker_image_batch_job_bundle_id = api_client.PathParameter( - name="docker_image_batch_job_bundle_id", - style=api_client.ParameterStyle.SIMPLE, - schema=DockerImageBatchJobBundleIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = DockerImageBatchJobBundleV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Docker Image Batch Job Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_docker_image_batch_job_bundle_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetDockerImageBatchJobModelBundleV1DockerImageBatchJobBundlesDockerImageBatchJobBundleIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/__init__.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/__init__.py index f069fac3..07e8fff3 100644 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/__init__.py +++ b/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST +path = PathValues.V1_DOCKERIMAGEBATCHJOBBUNDLES_LATEST \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.py b/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.py index ae2416da..44566b78 100644 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.py +++ b/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.py @@ -32,15 +32,17 @@ # Query params BundleNameSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'bundle_name': typing.Union[BundleNameSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "bundle_name": typing.Union[ - BundleNameSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -55,7 +57,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = DockerImageBatchJobBundleV1Response @@ -63,14 +66,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -79,21 +85,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -105,8 +116,9 @@ def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_ stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( @@ -116,8 +128,7 @@ def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_ accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( @@ -127,8 +138,10 @@ def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_ stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( self, @@ -148,7 +161,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_bundle_name,): + for parameter in ( + request_query_bundle_name, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -162,11 +177,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -183,7 +198,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -199,8 +218,9 @@ def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_l stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( @@ -210,8 +230,7 @@ def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_l accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( @@ -221,8 +240,10 @@ def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_l stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( self, @@ -237,7 +258,7 @@ def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_l accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -252,8 +273,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -263,8 +285,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -274,8 +295,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -290,5 +313,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.pyi b/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.pyi deleted file mode 100644 index c19ad09f..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_job_bundles_latest/get.pyi +++ /dev/null @@ -1,257 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.docker_image_batch_job_bundle_v1_response import ( - DockerImageBatchJobBundleV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Query params -BundleNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "bundle_name": typing.Union[ - BundleNameSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_bundle_name = api_client.QueryParameter( - name="bundle_name", - style=api_client.ParameterStyle.FORM, - schema=BundleNameSchema, - required=True, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = DockerImageBatchJobBundleV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Latest Docker Image Batch Job Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_bundle_name,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetLatestDockerImageBatchJobBundleV1DockerImageBatchJobBundlesLatestGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/__init__.py b/launch/api_client/paths/v1_docker_image_batch_jobs/__init__.py index 711b218e..d9af25cf 100644 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/__init__.py +++ b/launch/api_client/paths/v1_docker_image_batch_jobs/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_DOCKERIMAGEBATCHJOBS +path = PathValues.V1_DOCKERIMAGEBATCHJOBS \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/get.py b/launch/api_client/paths/v1_docker_image_batch_jobs/get.py index 84b5ceec..10a760d7 100644 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/get.py +++ b/launch/api_client/paths/v1_docker_image_batch_jobs/get.py @@ -30,17 +30,37 @@ from . import path # Query params -TriggerIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) + + +class TriggerIdSchema( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'TriggerIdSchema': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) +RequestRequiredQueryParams = typing_extensions.TypedDict( + 'RequestRequiredQueryParams', + { + } +) RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", + 'RequestOptionalQueryParams', { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], + 'trigger_id': typing.Union[TriggerIdSchema, None, str, ], }, - total=False, + total=False ) @@ -55,7 +75,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListDockerImageBatchJobsV1Response @@ -63,14 +84,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -79,21 +103,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -105,8 +134,9 @@ def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( @@ -116,8 +146,7 @@ def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( @@ -127,8 +156,10 @@ def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( self, @@ -148,7 +179,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_trigger_id,): + for parameter in ( + request_query_trigger_id, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -162,11 +195,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -183,7 +216,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -199,8 +236,9 @@ def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( @@ -210,8 +248,7 @@ def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( @@ -221,8 +258,10 @@ def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( self, @@ -237,7 +276,7 @@ def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -252,8 +291,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -263,8 +303,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -274,8 +313,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -290,5 +331,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/get.pyi b/launch/api_client/paths/v1_docker_image_batch_jobs/get.pyi deleted file mode 100644 index b3e8e85f..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/get.pyi +++ /dev/null @@ -1,257 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.list_docker_image_batch_jobs_v1_response import ( - ListDockerImageBatchJobsV1Response, -) -from urllib3._collections import HTTPHeaderDict - -# Query params -TriggerIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) -RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", - { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], - }, - total=False, -) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_trigger_id = api_client.QueryParameter( - name="trigger_id", - style=api_client.ParameterStyle.FORM, - schema=TriggerIdSchema, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ListDockerImageBatchJobsV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Docker Image Batch Jobs - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_trigger_id,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListDockerImageBatchJobsV1DockerImageBatchJobsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/post.py b/launch/api_client/paths/v1_docker_image_batch_jobs/post.py index f58a57ec..efa8405f 100644 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/post.py +++ b/launch/api_client/paths/v1_docker_image_batch_jobs/post.py @@ -38,12 +38,14 @@ request_body_create_docker_image_batch_job_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateDockerImageBatchJobV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( @@ -106,8 +117,10 @@ def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( @@ -118,8 +131,7 @@ def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( @@ -130,13 +142,15 @@ def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_docker_image_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( @@ -218,8 +236,10 @@ def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( @@ -230,8 +250,7 @@ def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( @@ -242,13 +261,15 @@ def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs/post.pyi b/launch/api_client/paths/v1_docker_image_batch_jobs/post.pyi deleted file mode 100644 index 114126f0..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_docker_image_batch_job_v1_request import ( - CreateDockerImageBatchJobV1Request, -) -from launch_client.model.create_docker_image_batch_job_v1_response import ( - CreateDockerImageBatchJobV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CreateDockerImageBatchJobV1Request - -request_body_create_docker_image_batch_job_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateDockerImageBatchJobV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Docker Image Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_create_docker_image_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateDockerImageBatchJobV1DockerImageBatchJobsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_docker_image_batch_job_v1_docker_image_batch_jobs_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_docker_image_batch_job_v1_docker_image_batch_jobs_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/__init__.py b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/__init__.py index 6411bc8a..c085c391 100644 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/__init__.py +++ b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID +path = PathValues.V1_DOCKERIMAGEBATCHJOBS_BATCH_JOB_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.py b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.py index 515e75e9..0b6de78f 100644 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.py +++ b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.py @@ -32,15 +32,17 @@ # Path params BatchJobIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetDockerImageBatchJobV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( @@ -115,8 +127,7 @@ def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( @@ -126,8 +137,10 @@ def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_batch_job_id,): + for parameter in ( + request_path_batch_job_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( @@ -209,8 +229,7 @@ def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( @@ -220,8 +239,10 @@ def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( self, @@ -236,7 +257,7 @@ def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.pyi b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.pyi deleted file mode 100644 index 283dc5b5..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/get.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_docker_image_batch_job_v1_response import ( - GetDockerImageBatchJobV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetDockerImageBatchJobV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Docker Image Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_batch_job_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.py b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.py index b79e3558..472dc100 100644 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.py +++ b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.py @@ -35,15 +35,17 @@ # Path params BatchJobIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'batch_job_id': typing.Union[BatchJobIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -62,12 +64,14 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): request_body_update_docker_image_batch_job_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = UpdateDockerImageBatchJobV1Response @@ -75,14 +79,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -91,21 +98,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -119,8 +131,9 @@ def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_o stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( @@ -132,8 +145,10 @@ def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_o stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( @@ -145,8 +160,7 @@ def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_o accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( @@ -158,13 +172,15 @@ def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_o stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -181,7 +197,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_batch_job_id,): + for parameter in ( + request_path_batch_job_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -189,29 +207,28 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_update_docker_image_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='put'.upper(), headers=_headers, fields=_fields, body=_body, @@ -230,7 +247,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -248,8 +269,9 @@ def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( @@ -261,8 +283,10 @@ def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( @@ -274,8 +298,7 @@ def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( @@ -287,13 +310,15 @@ def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -307,7 +332,7 @@ def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -324,8 +349,9 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def put( @@ -337,8 +363,10 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def put( @@ -350,8 +378,7 @@ def put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def put( @@ -363,13 +390,15 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -383,5 +412,7 @@ def put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.pyi b/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.pyi deleted file mode 100644 index b2d14223..00000000 --- a/launch/api_client/paths/v1_docker_image_batch_jobs_batch_job_id/put.pyi +++ /dev/null @@ -1,343 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.update_docker_image_batch_job_v1_request import ( - UpdateDockerImageBatchJobV1Request, -) -from launch_client.model.update_docker_image_batch_job_v1_response import ( - UpdateDockerImageBatchJobV1Response, -) -from urllib3._collections import HTTPHeaderDict - -# Path params -BatchJobIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "batch_job_id": typing.Union[ - BatchJobIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_batch_job_id = api_client.PathParameter( - name="batch_job_id", - style=api_client.ParameterStyle.SIMPLE, - schema=BatchJobIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateDockerImageBatchJobV1Request - -request_body_update_docker_image_batch_job_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = UpdateDockerImageBatchJobV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Docker Image Batch Job - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_batch_job_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_update_docker_image_batch_job_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="put".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class UpdateDockerImageBatchJobV1DockerImageBatchJobsBatchJobIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_docker_image_batch_job_v1_docker_image_batch_jobs_batch_job_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_files/__init__.py b/launch/api_client/paths/v1_files/__init__.py index 36f9e0ed..3028744d 100644 --- a/launch/api_client/paths/v1_files/__init__.py +++ b/launch/api_client/paths/v1_files/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_FILES +path = PathValues.V1_FILES \ No newline at end of file diff --git a/launch/api_client/paths/v1_files/get.py b/launch/api_client/paths/v1_files/get.py index d55241f4..06eee942 100644 --- a/launch/api_client/paths/v1_files/get.py +++ b/launch/api_client/paths/v1_files/get.py @@ -27,7 +27,8 @@ from . import path _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListFilesResponse @@ -35,20 +36,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -59,8 +65,9 @@ def _list_files_v1_files_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_files_v1_files_get_oapg( @@ -69,8 +76,7 @@ def _list_files_v1_files_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_files_v1_files_get_oapg( @@ -79,8 +85,10 @@ def _list_files_v1_files_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_files_v1_files_get_oapg( self, @@ -101,11 +109,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -122,7 +130,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -137,8 +149,9 @@ def list_files_v1_files_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_files_v1_files_get( @@ -147,8 +160,7 @@ def list_files_v1_files_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_files_v1_files_get( @@ -157,8 +169,10 @@ def list_files_v1_files_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_files_v1_files_get( self, @@ -171,7 +185,7 @@ def list_files_v1_files_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -185,8 +199,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -195,8 +210,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -205,8 +219,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -219,5 +235,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_files/get.pyi b/launch/api_client/paths/v1_files/get.pyi deleted file mode 100644 index 072e39ae..00000000 --- a/launch/api_client/paths/v1_files/get.pyi +++ /dev/null @@ -1,191 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.list_files_response import ListFilesResponse -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = ListFilesResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_files_v1_files_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_files_v1_files_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_files_v1_files_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_files_v1_files_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Files - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListFilesV1FilesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_files_v1_files_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_files_v1_files_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_files_v1_files_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_files_v1_files_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_files_v1_files_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_files_v1_files_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_files/post.py b/launch/api_client/paths/v1_files/post.py index a19a76a9..85c79c7f 100644 --- a/launch/api_client/paths/v1_files/post.py +++ b/launch/api_client/paths/v1_files/post.py @@ -36,11 +36,13 @@ request_body_body = api_client.RequestBody( content={ - "multipart/form-data": api_client.MediaType(schema=SchemaForRequestBodyMultipartFormData), + 'multipart/form-data': api_client.MediaType( + schema=SchemaForRequestBodyMultipartFormData), }, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = UploadFileResponse @@ -48,14 +50,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -64,21 +69,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -91,8 +101,9 @@ def _upload_file_v1_files_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _upload_file_v1_files_post_oapg( @@ -103,8 +114,10 @@ def _upload_file_v1_files_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _upload_file_v1_files_post_oapg( @@ -115,8 +128,7 @@ def _upload_file_v1_files_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _upload_file_v1_files_post_oapg( @@ -127,12 +139,14 @@ def _upload_file_v1_files_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _upload_file_v1_files_post_oapg( self, - content_type: str = "multipart/form-data", + content_type: str = 'multipart/form-data', body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -151,20 +165,20 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) _fields = None _body = None if body is not schemas.unset: serialized_data = request_body_body.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -183,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -200,8 +218,9 @@ def upload_file_v1_files_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def upload_file_v1_files_post( @@ -212,8 +231,10 @@ def upload_file_v1_files_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def upload_file_v1_files_post( @@ -224,8 +245,7 @@ def upload_file_v1_files_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def upload_file_v1_files_post( @@ -236,12 +256,14 @@ def upload_file_v1_files_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def upload_file_v1_files_post( self, - content_type: str = "multipart/form-data", + content_type: str = 'multipart/form-data', body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -254,7 +276,7 @@ def upload_file_v1_files_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -270,8 +292,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -282,8 +305,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -294,8 +319,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -306,12 +330,14 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, - content_type: str = "multipart/form-data", + content_type: str = 'multipart/form-data', body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -324,5 +350,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_files/post.pyi b/launch/api_client/paths/v1_files/post.pyi deleted file mode 100644 index fcc8dc82..00000000 --- a/launch/api_client/paths/v1_files/post.pyi +++ /dev/null @@ -1,286 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.body_upload_file_v1_files_post import ( - BodyUploadFileV1FilesPost, -) -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.upload_file_response import UploadFileResponse -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyMultipartFormData = BodyUploadFileV1FilesPost - -request_body_body = api_client.RequestBody( - content={ - "multipart/form-data": api_client.MediaType(schema=SchemaForRequestBodyMultipartFormData), - }, -) -SchemaFor200ResponseBodyApplicationJson = UploadFileResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - content_type: typing_extensions.Literal["multipart/form-data"] = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _upload_file_v1_files_post_oapg( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _upload_file_v1_files_post_oapg( - self, - content_type: str = "multipart/form-data", - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Upload File - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - _fields = None - _body = None - if body is not schemas.unset: - serialized_data = request_body_body.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class UploadFileV1FilesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def upload_file_v1_files_post( - self, - content_type: typing_extensions.Literal["multipart/form-data"] = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def upload_file_v1_files_post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def upload_file_v1_files_post( - self, - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def upload_file_v1_files_post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def upload_file_v1_files_post( - self, - content_type: str = "multipart/form-data", - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._upload_file_v1_files_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - content_type: typing_extensions.Literal["multipart/form-data"] = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - content_type: str = ..., - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - content_type: str = "multipart/form-data", - body: typing.Union[SchemaForRequestBodyMultipartFormData, schemas.Unset] = schemas.unset, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._upload_file_v1_files_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_files_file_id/__init__.py b/launch/api_client/paths/v1_files_file_id/__init__.py index 4d2d1df3..7093c7cf 100644 --- a/launch/api_client/paths/v1_files_file_id/__init__.py +++ b/launch/api_client/paths/v1_files_file_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_FILES_FILE_ID +path = PathValues.V1_FILES_FILE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_files_file_id/delete.py b/launch/api_client/paths/v1_files_file_id/delete.py index 227a2974..d7f9ecb7 100644 --- a/launch/api_client/paths/v1_files_file_id/delete.py +++ b/launch/api_client/paths/v1_files_file_id/delete.py @@ -30,15 +30,17 @@ # Path params FileIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'file_id': typing.Union[FileIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "file_id": typing.Union[ - FileIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -52,7 +54,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = DeleteFileResponse @@ -60,14 +63,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -76,21 +82,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -102,8 +113,9 @@ def _delete_file_v1_files_file_id_delete_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _delete_file_v1_files_file_id_delete_oapg( @@ -113,8 +125,7 @@ def _delete_file_v1_files_file_id_delete_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _delete_file_v1_files_file_id_delete_oapg( @@ -124,8 +135,10 @@ def _delete_file_v1_files_file_id_delete_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _delete_file_v1_files_file_id_delete_oapg( self, @@ -145,7 +158,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_file_id,): + for parameter in ( + request_path_file_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -153,17 +168,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="delete".upper(), + method='delete'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -180,7 +195,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -196,8 +215,9 @@ def delete_file_v1_files_file_id_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete_file_v1_files_file_id_delete( @@ -207,8 +227,7 @@ def delete_file_v1_files_file_id_delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete_file_v1_files_file_id_delete( @@ -218,8 +237,10 @@ def delete_file_v1_files_file_id_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete_file_v1_files_file_id_delete( self, @@ -234,7 +255,7 @@ def delete_file_v1_files_file_id_delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -249,8 +270,9 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete( @@ -260,8 +282,7 @@ def delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete( @@ -271,8 +292,10 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete( self, @@ -287,5 +310,7 @@ def delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_files_file_id/delete.pyi b/launch/api_client/paths/v1_files_file_id/delete.pyi deleted file mode 100644 index 8c23e1ee..00000000 --- a/launch/api_client/paths/v1_files_file_id/delete.pyi +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.delete_file_response import DeleteFileResponse -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -FileIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "file_id": typing.Union[ - FileIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_file_id = api_client.PathParameter( - name="file_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FileIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = DeleteFileResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _delete_file_v1_files_file_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _delete_file_v1_files_file_id_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _delete_file_v1_files_file_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _delete_file_v1_files_file_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete File - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_file_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="delete".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class DeleteFileV1FilesFileIdDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_file_v1_files_file_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete_file_v1_files_file_id_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete_file_v1_files_file_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete_file_v1_files_file_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_file_v1_files_file_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_file_v1_files_file_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_files_file_id/get.py b/launch/api_client/paths/v1_files_file_id/get.py index b579b79d..66559106 100644 --- a/launch/api_client/paths/v1_files_file_id/get.py +++ b/launch/api_client/paths/v1_files_file_id/get.py @@ -30,15 +30,17 @@ # Path params FileIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'file_id': typing.Union[FileIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "file_id": typing.Union[ - FileIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -52,7 +54,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetFileResponse @@ -60,14 +63,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -76,21 +82,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -102,8 +113,9 @@ def _get_file_v1_files_file_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_file_v1_files_file_id_get_oapg( @@ -113,8 +125,7 @@ def _get_file_v1_files_file_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_file_v1_files_file_id_get_oapg( @@ -124,8 +135,10 @@ def _get_file_v1_files_file_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_file_v1_files_file_id_get_oapg( self, @@ -145,7 +158,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_file_id,): + for parameter in ( + request_path_file_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -153,17 +168,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -180,7 +195,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -196,8 +215,9 @@ def get_file_v1_files_file_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_file_v1_files_file_id_get( @@ -207,8 +227,7 @@ def get_file_v1_files_file_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_file_v1_files_file_id_get( @@ -218,8 +237,10 @@ def get_file_v1_files_file_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_file_v1_files_file_id_get( self, @@ -234,7 +255,7 @@ def get_file_v1_files_file_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -249,8 +270,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -260,8 +282,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -271,8 +292,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -287,5 +310,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_files_file_id/get.pyi b/launch/api_client/paths/v1_files_file_id/get.pyi deleted file mode 100644 index f1f9889c..00000000 --- a/launch/api_client/paths/v1_files_file_id/get.pyi +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_file_response import GetFileResponse -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -FileIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "file_id": typing.Union[ - FileIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_file_id = api_client.PathParameter( - name="file_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FileIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetFileResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_file_v1_files_file_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_file_v1_files_file_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_file_v1_files_file_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_file_v1_files_file_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get File - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_file_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetFileV1FilesFileIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_file_v1_files_file_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_file_v1_files_file_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_file_v1_files_file_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_file_v1_files_file_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_file_v1_files_file_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_file_v1_files_file_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_files_file_id_content/__init__.py b/launch/api_client/paths/v1_files_file_id_content/__init__.py index 481ad1b6..a0c46729 100644 --- a/launch/api_client/paths/v1_files_file_id_content/__init__.py +++ b/launch/api_client/paths/v1_files_file_id_content/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_FILES_FILE_ID_CONTENT +path = PathValues.V1_FILES_FILE_ID_CONTENT \ No newline at end of file diff --git a/launch/api_client/paths/v1_files_file_id_content/get.py b/launch/api_client/paths/v1_files_file_id_content/get.py index 1b6764ed..37a8642e 100644 --- a/launch/api_client/paths/v1_files_file_id_content/get.py +++ b/launch/api_client/paths/v1_files_file_id_content/get.py @@ -32,15 +32,17 @@ # Path params FileIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'file_id': typing.Union[FileIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "file_id": typing.Union[ - FileIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetFileContentResponse @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_file_content_v1_files_file_id_content_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_file_content_v1_files_file_id_content_get_oapg( @@ -115,8 +127,7 @@ def _get_file_content_v1_files_file_id_content_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_file_content_v1_files_file_id_content_get_oapg( @@ -126,8 +137,10 @@ def _get_file_content_v1_files_file_id_content_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_file_content_v1_files_file_id_content_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_file_id,): + for parameter in ( + request_path_file_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_file_content_v1_files_file_id_content_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_file_content_v1_files_file_id_content_get( @@ -209,8 +229,7 @@ def get_file_content_v1_files_file_id_content_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_file_content_v1_files_file_id_content_get( @@ -220,8 +239,10 @@ def get_file_content_v1_files_file_id_content_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_file_content_v1_files_file_id_content_get( self, @@ -236,7 +257,7 @@ def get_file_content_v1_files_file_id_content_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_batch_completions/__init__.py b/launch/api_client/paths/v1_llm_batch_completions/__init__.py index 4103285f..6da01f4e 100644 --- a/launch/api_client/paths/v1_llm_batch_completions/__init__.py +++ b/launch/api_client/paths/v1_llm_batch_completions/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_BATCHCOMPLETIONS +path = PathValues.V1_LLM_BATCHCOMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_batch_completions/post.py b/launch/api_client/paths/v1_llm_batch_completions/post.py index 67737d35..f3823b16 100644 --- a/launch/api_client/paths/v1_llm_batch_completions/post.py +++ b/launch/api_client/paths/v1_llm_batch_completions/post.py @@ -22,43 +22,48 @@ from launch.api_client import schemas # noqa: F401 from launch.api_client import api_client, exceptions -from launch.api_client.model.create_batch_completions_request import ( - CreateBatchCompletionsRequest, +from launch.api_client.model.create_batch_completions_v1_request import ( + CreateBatchCompletionsV1Request, ) -from launch.api_client.model.create_batch_completions_response import ( - CreateBatchCompletionsResponse, +from launch.api_client.model.create_batch_completions_v1_response import ( + CreateBatchCompletionsV1Response, ) from launch.api_client.model.http_validation_error import HTTPValidationError from . import path # body param -SchemaForRequestBodyApplicationJson = CreateBatchCompletionsRequest +SchemaForRequestBodyApplicationJson = CreateBatchCompletionsV1Request -request_body_create_batch_completions_request = api_client.RequestBody( +request_body_create_batch_completions_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] -SchemaFor200ResponseBodyApplicationJson = CreateBatchCompletionsResponse +SchemaFor200ResponseBodyApplicationJson = CreateBatchCompletionsV1Response @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_batch_completions_v1_llm_batch_completions_post_oapg( @@ -106,8 +117,10 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_batch_completions_v1_llm_batch_completions_post_oapg( @@ -118,8 +131,7 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_batch_completions_v1_llm_batch_completions_post_oapg( @@ -130,13 +142,15 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_batch_completions_v1_llm_batch_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None - serialized_data = request_body_create_batch_completions_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + serialized_data = request_body_create_batch_completions_v1_request.serialize(body, content_type) + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_batch_completions_v1_llm_batch_completions_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_batch_completions_v1_llm_batch_completions_post( @@ -218,8 +236,10 @@ def create_batch_completions_v1_llm_batch_completions_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_batch_completions_v1_llm_batch_completions_post( @@ -230,8 +250,7 @@ def create_batch_completions_v1_llm_batch_completions_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_batch_completions_v1_llm_batch_completions_post( @@ -242,13 +261,15 @@ def create_batch_completions_v1_llm_batch_completions_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_batch_completions_v1_llm_batch_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_batch_completions_v1_llm_batch_completions_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_completion_sync/post.py b/launch/api_client/paths/v1_llm_completion_sync/post.py index 365882a2..ef0407a1 100644 --- a/launch/api_client/paths/v1_llm_completion_sync/post.py +++ b/launch/api_client/paths/v1_llm_completion_sync/post.py @@ -76,7 +76,9 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset @@ -92,7 +94,9 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset @@ -113,7 +117,9 @@ class BaseApi(api_client.Api): @typing.overload def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: typing_extensions.Literal["application/json"] = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -126,7 +132,9 @@ def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( @typing.overload def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -139,7 +147,9 @@ def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( @typing.overload def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), @@ -152,7 +162,9 @@ def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( @typing.overload def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -164,7 +176,9 @@ def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = "application/json", query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -246,7 +260,9 @@ class CreateCompletionSyncTaskV1LlmCompletionSyncPost(BaseApi): @typing.overload def create_completion_sync_task_v1_llm_completion_sync_post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: typing_extensions.Literal["application/json"] = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -259,7 +275,9 @@ def create_completion_sync_task_v1_llm_completion_sync_post( @typing.overload def create_completion_sync_task_v1_llm_completion_sync_post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -272,7 +290,9 @@ def create_completion_sync_task_v1_llm_completion_sync_post( @typing.overload def create_completion_sync_task_v1_llm_completion_sync_post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), @@ -285,7 +305,9 @@ def create_completion_sync_task_v1_llm_completion_sync_post( @typing.overload def create_completion_sync_task_v1_llm_completion_sync_post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -297,7 +319,9 @@ def create_completion_sync_task_v1_llm_completion_sync_post( def create_completion_sync_task_v1_llm_completion_sync_post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = "application/json", query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -322,7 +346,9 @@ class ApiForpost(BaseApi): @typing.overload def post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: typing_extensions.Literal["application/json"] = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -335,7 +361,9 @@ def post( @typing.overload def post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -348,7 +376,9 @@ def post( @typing.overload def post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], skip_deserialization: typing_extensions.Literal[True], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), @@ -361,7 +391,9 @@ def post( @typing.overload def post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = ..., query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -373,7 +405,9 @@ def post( def post( self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], + body: typing.Union[ + SchemaForRequestBodyApplicationJson, + ], content_type: str = "application/json", query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, diff --git a/launch/api_client/paths/v1_llm_completion_sync/post.pyi b/launch/api_client/paths/v1_llm_completion_sync/post.pyi deleted file mode 100644 index 7a09cde2..00000000 --- a/launch/api_client/paths/v1_llm_completion_sync/post.pyi +++ /dev/null @@ -1,348 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.completion_sync_v1_request import ( - CompletionSyncV1Request, -) -from launch_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_endpoint_name = api_client.QueryParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointNameSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = CompletionSyncV1Request - -request_body_completion_sync_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CompletionSyncV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_completion_sync_task_v1_llm_completion_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Completion Sync Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_name,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_completion_sync_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException( - status=response.status, - reason=response.reason, - api_response=api_response, - ) - - return api_response - -class CreateCompletionSyncTaskV1LlmCompletionSyncPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_completion_sync_task_v1_llm_completion_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completion_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completion_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_completions_stream/__init__.py b/launch/api_client/paths/v1_llm_completions_stream/__init__.py index 08046c66..b7bc2957 100644 --- a/launch/api_client/paths/v1_llm_completions_stream/__init__.py +++ b/launch/api_client/paths/v1_llm_completions_stream/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_COMPLETIONSSTREAM +path = PathValues.V1_LLM_COMPLETIONSSTREAM \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_completions_stream/post.py b/launch/api_client/paths/v1_llm_completions_stream/post.py index 912be32f..38643dcb 100644 --- a/launch/api_client/paths/v1_llm_completions_stream/post.py +++ b/launch/api_client/paths/v1_llm_completions_stream/post.py @@ -35,15 +35,17 @@ # Query params ModelEndpointNameSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -63,12 +65,14 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) request_body_completion_stream_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CompletionStreamV1Response @@ -76,14 +80,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -92,21 +99,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -120,8 +132,9 @@ def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( @@ -133,8 +146,10 @@ def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( @@ -146,8 +161,7 @@ def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( @@ -159,13 +173,15 @@ def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -182,7 +198,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_name,): + for parameter in ( + request_query_model_endpoint_name, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -196,23 +214,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_completion_stream_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -231,7 +248,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -249,8 +270,9 @@ def create_completion_stream_task_v1_llm_completions_stream_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_completion_stream_task_v1_llm_completions_stream_post( @@ -262,8 +284,10 @@ def create_completion_stream_task_v1_llm_completions_stream_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_completion_stream_task_v1_llm_completions_stream_post( @@ -275,8 +299,7 @@ def create_completion_stream_task_v1_llm_completions_stream_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_completion_stream_task_v1_llm_completions_stream_post( @@ -288,13 +311,15 @@ def create_completion_stream_task_v1_llm_completions_stream_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_completion_stream_task_v1_llm_completions_stream_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -308,7 +333,7 @@ def create_completion_stream_task_v1_llm_completions_stream_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -325,8 +350,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -338,8 +364,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -351,8 +379,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -364,13 +391,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -384,5 +413,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_completions_stream/post.pyi b/launch/api_client/paths/v1_llm_completions_stream/post.pyi deleted file mode 100644 index 227316c9..00000000 --- a/launch/api_client/paths/v1_llm_completions_stream/post.pyi +++ /dev/null @@ -1,344 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.completion_stream_v1_request import ( - CompletionStreamV1Request, -) -from launch_client.model.completion_stream_v1_response import ( - CompletionStreamV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_endpoint_name = api_client.QueryParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointNameSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = CompletionStreamV1Request - -request_body_completion_stream_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CompletionStreamV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_completion_stream_task_v1_llm_completions_stream_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Completion Stream Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_name,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_completion_stream_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateCompletionStreamTaskV1LlmCompletionsStreamPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_completion_stream_task_v1_llm_completions_stream_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_stream_task_v1_llm_completions_stream_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_stream_task_v1_llm_completions_stream_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_completions_sync/__init__.py b/launch/api_client/paths/v1_llm_completions_sync/__init__.py index d6eabaa0..2eec9261 100644 --- a/launch/api_client/paths/v1_llm_completions_sync/__init__.py +++ b/launch/api_client/paths/v1_llm_completions_sync/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_COMPLETIONSSYNC +path = PathValues.V1_LLM_COMPLETIONSSYNC \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_completions_sync/post.py b/launch/api_client/paths/v1_llm_completions_sync/post.py index 3b0f7a1c..8a287bdd 100644 --- a/launch/api_client/paths/v1_llm_completions_sync/post.py +++ b/launch/api_client/paths/v1_llm_completions_sync/post.py @@ -35,15 +35,17 @@ # Query params ModelEndpointNameSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -63,12 +65,14 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) request_body_completion_sync_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CompletionSyncV1Response @@ -76,14 +80,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -92,21 +99,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -120,8 +132,9 @@ def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( @@ -133,8 +146,10 @@ def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( @@ -146,8 +161,7 @@ def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( @@ -159,13 +173,15 @@ def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -182,7 +198,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_name,): + for parameter in ( + request_query_model_endpoint_name, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -196,23 +214,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_completion_sync_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -231,7 +248,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -249,8 +270,9 @@ def create_completion_sync_task_v1_llm_completions_sync_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_completion_sync_task_v1_llm_completions_sync_post( @@ -262,8 +284,10 @@ def create_completion_sync_task_v1_llm_completions_sync_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_completion_sync_task_v1_llm_completions_sync_post( @@ -275,8 +299,7 @@ def create_completion_sync_task_v1_llm_completions_sync_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_completion_sync_task_v1_llm_completions_sync_post( @@ -288,13 +311,15 @@ def create_completion_sync_task_v1_llm_completions_sync_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_completion_sync_task_v1_llm_completions_sync_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -308,7 +333,7 @@ def create_completion_sync_task_v1_llm_completions_sync_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -325,8 +350,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -338,8 +364,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -351,8 +379,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -364,13 +391,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -384,5 +413,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_completions_sync/post.pyi b/launch/api_client/paths/v1_llm_completions_sync/post.pyi deleted file mode 100644 index 73108176..00000000 --- a/launch/api_client/paths/v1_llm_completions_sync/post.pyi +++ /dev/null @@ -1,344 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.completion_sync_v1_request import ( - CompletionSyncV1Request, -) -from launch_client.model.completion_sync_v1_response import ( - CompletionSyncV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_endpoint_name = api_client.QueryParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointNameSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = CompletionSyncV1Request - -request_body_completion_sync_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CompletionSyncV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_completion_sync_task_v1_llm_completions_sync_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Completion Sync Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_name,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_completion_sync_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateCompletionSyncTaskV1LlmCompletionsSyncPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_completion_sync_task_v1_llm_completions_sync_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completions_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_completion_sync_task_v1_llm_completions_sync_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_fine_tunes/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes/__init__.py index 8a273ae3..9c6cd840 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes/__init__.py +++ b/launch/api_client/paths/v1_llm_fine_tunes/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_FINETUNES +path = PathValues.V1_LLM_FINETUNES \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes/get.py b/launch/api_client/paths/v1_llm_fine_tunes/get.py index 4034d2fe..ed423f7f 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes/get.py +++ b/launch/api_client/paths/v1_llm_fine_tunes/get.py @@ -29,7 +29,8 @@ from . import path _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListFineTunesResponse @@ -37,20 +38,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -61,8 +67,9 @@ def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( @@ -71,8 +78,7 @@ def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( @@ -81,8 +87,10 @@ def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( self, @@ -103,11 +111,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -124,7 +132,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -139,8 +151,9 @@ def list_fine_tunes_v1_llm_fine_tunes_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_fine_tunes_v1_llm_fine_tunes_get( @@ -149,8 +162,7 @@ def list_fine_tunes_v1_llm_fine_tunes_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_fine_tunes_v1_llm_fine_tunes_get( @@ -159,8 +171,10 @@ def list_fine_tunes_v1_llm_fine_tunes_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_fine_tunes_v1_llm_fine_tunes_get( self, @@ -173,7 +187,7 @@ def list_fine_tunes_v1_llm_fine_tunes_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -187,8 +201,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -197,8 +212,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -207,8 +221,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -221,5 +237,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_fine_tunes/get.pyi b/launch/api_client/paths/v1_llm_fine_tunes/get.pyi deleted file mode 100644 index 55004d33..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes/get.pyi +++ /dev/null @@ -1,191 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.list_fine_tunes_response import ListFineTunesResponse -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = ListFineTunesResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_fine_tunes_v1_llm_fine_tunes_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Fine Tunes - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListFineTunesV1LlmFineTunesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_fine_tunes_v1_llm_fine_tunes_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_fine_tunes_v1_llm_fine_tunes_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_fine_tunes_v1_llm_fine_tunes_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_fine_tunes/post.py b/launch/api_client/paths/v1_llm_fine_tunes/post.py index 678f3b56..446835da 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes/post.py +++ b/launch/api_client/paths/v1_llm_fine_tunes/post.py @@ -38,12 +38,14 @@ request_body_create_fine_tune_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateFineTuneResponse @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_fine_tune_v1_llm_fine_tunes_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_fine_tune_v1_llm_fine_tunes_post_oapg( @@ -106,8 +117,10 @@ def _create_fine_tune_v1_llm_fine_tunes_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_fine_tune_v1_llm_fine_tunes_post_oapg( @@ -118,8 +131,7 @@ def _create_fine_tune_v1_llm_fine_tunes_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_fine_tune_v1_llm_fine_tunes_post_oapg( @@ -130,13 +142,15 @@ def _create_fine_tune_v1_llm_fine_tunes_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_fine_tune_v1_llm_fine_tunes_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_fine_tune_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_fine_tune_v1_llm_fine_tunes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_fine_tune_v1_llm_fine_tunes_post( @@ -218,8 +236,10 @@ def create_fine_tune_v1_llm_fine_tunes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_fine_tune_v1_llm_fine_tunes_post( @@ -230,8 +250,7 @@ def create_fine_tune_v1_llm_fine_tunes_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_fine_tune_v1_llm_fine_tunes_post( @@ -242,13 +261,15 @@ def create_fine_tune_v1_llm_fine_tunes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_fine_tune_v1_llm_fine_tunes_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_fine_tune_v1_llm_fine_tunes_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_fine_tunes/post.pyi b/launch/api_client/paths/v1_llm_fine_tunes/post.pyi deleted file mode 100644 index 235f3967..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes/post.pyi +++ /dev/null @@ -1,290 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_fine_tune_request import CreateFineTuneRequest -from launch_client.model.create_fine_tune_response import ( - CreateFineTuneResponse, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CreateFineTuneRequest - -request_body_create_fine_tune_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateFineTuneResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_fine_tune_v1_llm_fine_tunes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Fine Tune - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_create_fine_tune_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateFineTuneV1LlmFineTunesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_fine_tune_v1_llm_fine_tunes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_fine_tune_v1_llm_fine_tunes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_fine_tune_v1_llm_fine_tunes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/__init__.py index ec91d16c..32571f7e 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/__init__.py +++ b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID +path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.py index 9be6617e..f783879d 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.py +++ b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.py @@ -30,15 +30,17 @@ # Path params FineTuneIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'fine_tune_id': typing.Union[FineTuneIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "fine_tune_id": typing.Union[ - FineTuneIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -52,7 +54,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetFineTuneResponse @@ -60,14 +63,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -76,21 +82,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -102,8 +113,9 @@ def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( @@ -113,8 +125,7 @@ def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( @@ -124,8 +135,10 @@ def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( self, @@ -145,7 +158,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_fine_tune_id,): + for parameter in ( + request_path_fine_tune_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -153,17 +168,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -180,7 +195,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -196,8 +215,9 @@ def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( @@ -207,8 +227,7 @@ def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( @@ -218,8 +237,10 @@ def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( self, @@ -234,7 +255,7 @@ def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -249,8 +270,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -260,8 +282,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -271,8 +292,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -287,5 +310,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.pyi b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.pyi deleted file mode 100644 index 41b3fc98..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id/get.pyi +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_fine_tune_response import GetFineTuneResponse -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -FineTuneIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "fine_tune_id": typing.Union[ - FineTuneIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_fine_tune_id = api_client.PathParameter( - name="fine_tune_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FineTuneIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetFineTuneResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Fine Tune - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_fine_tune_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetFineTuneV1LlmFineTunesFineTuneIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_v1_llm_fine_tunes_fine_tune_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py index 4b3e79e1..216fd5a2 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py +++ b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL +path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_CANCEL \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.py index b19a37fa..f8973227 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.py +++ b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.py @@ -32,15 +32,17 @@ # Path params FineTuneIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'fine_tune_id': typing.Union[FineTuneIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "fine_tune_id": typing.Union[ - FineTuneIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CancelFineTuneResponse @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( @@ -115,8 +127,7 @@ def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( @@ -126,8 +137,10 @@ def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_fine_tune_id,): + for parameter in ( + request_path_fine_tune_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='put'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( @@ -209,8 +229,7 @@ def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( @@ -220,8 +239,10 @@ def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( self, @@ -236,7 +257,7 @@ def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def put( @@ -262,8 +284,7 @@ def put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def put( @@ -273,8 +294,10 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def put( self, @@ -289,5 +312,7 @@ def put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/__init__.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/__init__.py index 633c6d09..efa06c77 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/__init__.py +++ b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS +path = PathValues.V1_LLM_FINETUNES_FINE_TUNE_ID_EVENTS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.py b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.py index 6daabc63..e116c368 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.py +++ b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.py @@ -32,15 +32,17 @@ # Path params FineTuneIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'fine_tune_id': typing.Union[FineTuneIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "fine_tune_id": typing.Union[ - FineTuneIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetFineTuneEventsResponse @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( @@ -115,8 +127,7 @@ def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( @@ -126,8 +137,10 @@ def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_fine_tune_id,): + for parameter in ( + request_path_fine_tune_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( @@ -209,8 +229,7 @@ def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( @@ -220,8 +239,10 @@ def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( self, @@ -236,7 +257,7 @@ def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.pyi b/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.pyi deleted file mode 100644 index a7a8b126..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_events/get.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_fine_tune_events_response import ( - GetFineTuneEventsResponse, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -FineTuneIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "fine_tune_id": typing.Union[ - FineTuneIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_fine_tune_id = api_client.PathParameter( - name="fine_tune_id", - style=api_client.ParameterStyle.SIMPLE, - schema=FineTuneIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetFineTuneEventsResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Fine Tune Events - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_fine_tune_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetFineTuneEventsV1LlmFineTunesFineTuneIdEventsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_fine_tune_id_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.py b/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.py index 00512559..a4792598 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.py +++ b/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.py @@ -62,7 +62,9 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset @@ -78,7 +80,9 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset diff --git a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.pyi b/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.pyi deleted file mode 100644 index b964e720..00000000 --- a/launch/api_client/paths/v1_llm_fine_tunes_model_endpoint_name_events/get.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_fine_tune_events_response import ( - GetFineTuneEventsResponse, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetFineTuneEventsResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Fine Tune Events - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_endpoint_name,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetFineTuneEventsV1LlmFineTunesModelEndpointNameEventsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_fine_tune_events_v1_llm_fine_tunes_model_endpoint_name_events_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_model_endpoints/__init__.py b/launch/api_client/paths/v1_llm_model_endpoints/__init__.py index e36dbbac..c84b3640 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints/__init__.py +++ b/launch/api_client/paths/v1_llm_model_endpoints/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_MODELENDPOINTS +path = PathValues.V1_LLM_MODELENDPOINTS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_model_endpoints/get.py b/launch/api_client/paths/v1_llm_model_endpoints/get.py index 472b9f6d..cf89e8bd 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints/get.py +++ b/launch/api_client/paths/v1_llm_model_endpoints/get.py @@ -33,19 +33,39 @@ from . import path # Query params -NameSchema = schemas.StrSchema + + +class NameSchema( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'NameSchema': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) OrderBySchema = ModelEndpointOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) +RequestRequiredQueryParams = typing_extensions.TypedDict( + 'RequestRequiredQueryParams', + { + } +) RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", + 'RequestOptionalQueryParams', { - "name": typing.Union[ - NameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], + 'name': typing.Union[NameSchema, None, str, ], + 'order_by': typing.Union[OrderBySchema, ], }, - total=False, + total=False ) @@ -66,7 +86,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListLLMModelEndpointsV1Response @@ -74,14 +95,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -90,21 +114,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -116,8 +145,9 @@ def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( @@ -127,8 +157,7 @@ def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( @@ -138,8 +167,10 @@ def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( self, @@ -176,11 +207,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -197,7 +228,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -213,8 +248,9 @@ def list_model_endpoints_v1_llm_model_endpoints_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_model_endpoints_v1_llm_model_endpoints_get( @@ -224,8 +260,7 @@ def list_model_endpoints_v1_llm_model_endpoints_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_model_endpoints_v1_llm_model_endpoints_get( @@ -235,8 +270,10 @@ def list_model_endpoints_v1_llm_model_endpoints_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_model_endpoints_v1_llm_model_endpoints_get( self, @@ -251,7 +288,7 @@ def list_model_endpoints_v1_llm_model_endpoints_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -266,8 +303,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -277,8 +315,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -288,8 +325,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -304,5 +343,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_model_endpoints/get.pyi b/launch/api_client/paths/v1_llm_model_endpoints/get.pyi deleted file mode 100644 index 77843ec7..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints/get.pyi +++ /dev/null @@ -1,269 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.list_llm_model_endpoints_v1_response import ( - ListLLMModelEndpointsV1Response, -) -from launch_client.model.model_endpoint_order_by import ModelEndpointOrderBy -from urllib3._collections import HTTPHeaderDict - -# Query params -NameSchema = schemas.StrSchema -OrderBySchema = ModelEndpointOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) -RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", - { - "name": typing.Union[ - NameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], - }, - total=False, -) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_name = api_client.QueryParameter( - name="name", - style=api_client.ParameterStyle.FORM, - schema=NameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ListLLMModelEndpointsV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_model_endpoints_v1_llm_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Endpoints - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListModelEndpointsV1LlmModelEndpointsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_model_endpoints_v1_llm_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_llm_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_llm_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_model_endpoints/post.py b/launch/api_client/paths/v1_llm_model_endpoints/post.py index 63f2239a..2d1464b1 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints/post.py +++ b/launch/api_client/paths/v1_llm_model_endpoints/post.py @@ -38,12 +38,14 @@ request_body_create_llm_model_endpoint_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateLLMModelEndpointV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( @@ -106,8 +117,10 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( @@ -118,8 +131,7 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( @@ -130,13 +142,15 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_llm_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_model_endpoint_v1_llm_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_model_endpoint_v1_llm_model_endpoints_post( @@ -218,8 +236,10 @@ def create_model_endpoint_v1_llm_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_model_endpoint_v1_llm_model_endpoints_post( @@ -230,8 +250,7 @@ def create_model_endpoint_v1_llm_model_endpoints_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_model_endpoint_v1_llm_model_endpoints_post( @@ -242,13 +261,15 @@ def create_model_endpoint_v1_llm_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_model_endpoint_v1_llm_model_endpoints_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_model_endpoint_v1_llm_model_endpoints_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_model_endpoints_download/__init__.py b/launch/api_client/paths/v1_llm_model_endpoints_download/__init__.py index b1e3ff3f..a6b0f34f 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints_download/__init__.py +++ b/launch/api_client/paths/v1_llm_model_endpoints_download/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_MODELENDPOINTS_DOWNLOAD +path = PathValues.V1_LLM_MODELENDPOINTS_DOWNLOAD \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_model_endpoints_download/post.py b/launch/api_client/paths/v1_llm_model_endpoints_download/post.py index 29beab8e..981af61b 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints_download/post.py +++ b/launch/api_client/paths/v1_llm_model_endpoints_download/post.py @@ -36,12 +36,14 @@ request_body_model_download_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ModelDownloadResponse @@ -49,14 +51,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -65,21 +70,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -92,8 +102,9 @@ def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( @@ -104,8 +115,10 @@ def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( @@ -116,8 +129,7 @@ def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( @@ -128,13 +140,15 @@ def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -152,23 +166,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_model_download_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -187,7 +200,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -204,8 +221,9 @@ def download_model_endpoint_v1_llm_model_endpoints_download_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def download_model_endpoint_v1_llm_model_endpoints_download_post( @@ -216,8 +234,10 @@ def download_model_endpoint_v1_llm_model_endpoints_download_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def download_model_endpoint_v1_llm_model_endpoints_download_post( @@ -228,8 +248,7 @@ def download_model_endpoint_v1_llm_model_endpoints_download_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def download_model_endpoint_v1_llm_model_endpoints_download_post( @@ -240,13 +259,15 @@ def download_model_endpoint_v1_llm_model_endpoints_download_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def download_model_endpoint_v1_llm_model_endpoints_download_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -258,7 +279,7 @@ def download_model_endpoint_v1_llm_model_endpoints_download_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -274,8 +295,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -286,8 +308,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -298,8 +322,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -310,13 +333,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -328,5 +353,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_model_endpoints_download/post.pyi b/launch/api_client/paths/v1_llm_model_endpoints_download/post.pyi deleted file mode 100644 index 4023dafb..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_download/post.pyi +++ /dev/null @@ -1,288 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.model_download_request import ModelDownloadRequest -from launch_client.model.model_download_response import ModelDownloadResponse -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = ModelDownloadRequest - -request_body_model_download_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = ModelDownloadResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Download Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_model_download_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class DownloadModelEndpointV1LlmModelEndpointsDownloadPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def download_model_endpoint_v1_llm_model_endpoints_download_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._download_model_endpoint_v1_llm_model_endpoints_download_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/__init__.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/__init__.py index ec37b741..c5a41d53 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/__init__.py +++ b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME +path = PathValues.V1_LLM_MODELENDPOINTS_MODEL_ENDPOINT_NAME \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.py index 88db0473..2638eb9f 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.py +++ b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.py @@ -32,15 +32,17 @@ # Path params ModelEndpointNameSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = DeleteLLMEndpointResponse @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( @@ -115,8 +127,7 @@ def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( @@ -126,8 +137,10 @@ def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_name,): + for parameter in ( + request_path_model_endpoint_name, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="delete".upper(), + method='delete'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( @@ -209,8 +229,7 @@ def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( @@ -220,8 +239,10 @@ def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( self, @@ -236,7 +257,7 @@ def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete( @@ -262,8 +284,7 @@ def delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete( @@ -273,8 +294,10 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete( self, @@ -289,5 +312,7 @@ def delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.pyi b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.pyi deleted file mode 100644 index 60449d92..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/delete.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.delete_llm_endpoint_response import ( - DeleteLLMEndpointResponse, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = DeleteLLMEndpointResponse - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete Llm Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_endpoint_name,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="delete".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class DeleteLlmModelEndpointV1LlmModelEndpointsModelEndpointNameDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_llm_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.py index a80fc0c1..690338c6 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.py +++ b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.py @@ -32,15 +32,17 @@ # Path params ModelEndpointNameSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetLLMModelEndpointV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( @@ -115,8 +127,7 @@ def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( @@ -126,8 +137,10 @@ def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_name,): + for parameter in ( + request_path_model_endpoint_name, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( @@ -209,8 +229,7 @@ def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( @@ -220,8 +239,10 @@ def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( self, @@ -236,7 +257,7 @@ def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.pyi b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.pyi deleted file mode 100644 index 3ae9392f..00000000 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/get.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_llm_model_endpoint_v1_response import ( - GetLLMModelEndpointV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -ModelEndpointNameSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetLLMModelEndpointV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_endpoint_name,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetModelEndpointV1LlmModelEndpointsModelEndpointNameGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.py b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.py index 78390380..f7ca8bf2 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.py +++ b/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.py @@ -35,15 +35,17 @@ # Path params ModelEndpointNameSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_endpoint_name': typing.Union[ModelEndpointNameSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -62,12 +64,14 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): request_body_update_llm_model_endpoint_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = UpdateLLMModelEndpointV1Response @@ -75,14 +79,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -91,21 +98,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -119,8 +131,9 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( @@ -132,8 +145,10 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( @@ -145,8 +160,7 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( @@ -158,13 +172,15 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -181,7 +197,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_name,): + for parameter in ( + request_path_model_endpoint_name, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -189,29 +207,28 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_update_llm_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='put'.upper(), headers=_headers, fields=_fields, body=_body, @@ -230,7 +247,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -248,8 +269,9 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( @@ -261,8 +283,10 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( @@ -274,8 +298,7 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( @@ -287,13 +310,15 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -307,7 +332,7 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -324,8 +349,9 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def put( @@ -337,8 +363,10 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def put( @@ -350,8 +378,7 @@ def put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def put( @@ -363,13 +390,15 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -383,5 +412,7 @@ def put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_bundles/__init__.py b/launch/api_client/paths/v1_model_bundles/__init__.py index 46bf5ae2..02168487 100644 --- a/launch/api_client/paths/v1_model_bundles/__init__.py +++ b/launch/api_client/paths/v1_model_bundles/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELBUNDLES +path = PathValues.V1_MODELBUNDLES \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles/get.py b/launch/api_client/paths/v1_model_bundles/get.py index 4ed7d60d..a9fd9681 100644 --- a/launch/api_client/paths/v1_model_bundles/get.py +++ b/launch/api_client/paths/v1_model_bundles/get.py @@ -31,19 +31,39 @@ from . import path # Query params -ModelNameSchema = schemas.StrSchema + + +class ModelNameSchema( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ModelNameSchema': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) +RequestRequiredQueryParams = typing_extensions.TypedDict( + 'RequestRequiredQueryParams', + { + } +) RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", + 'RequestOptionalQueryParams', { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], + 'model_name': typing.Union[ModelNameSchema, None, str, ], + 'order_by': typing.Union[OrderBySchema, ], }, - total=False, + total=False ) @@ -64,7 +84,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListModelBundlesV1Response @@ -72,14 +93,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -88,21 +112,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -114,8 +143,9 @@ def _list_model_bundles_v1_model_bundles_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_model_bundles_v1_model_bundles_get_oapg( @@ -125,8 +155,7 @@ def _list_model_bundles_v1_model_bundles_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_model_bundles_v1_model_bundles_get_oapg( @@ -136,8 +165,10 @@ def _list_model_bundles_v1_model_bundles_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_model_bundles_v1_model_bundles_get_oapg( self, @@ -174,11 +205,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -195,7 +226,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -211,8 +246,9 @@ def list_model_bundles_v1_model_bundles_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_model_bundles_v1_model_bundles_get( @@ -222,8 +258,7 @@ def list_model_bundles_v1_model_bundles_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_model_bundles_v1_model_bundles_get( @@ -233,8 +268,10 @@ def list_model_bundles_v1_model_bundles_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_model_bundles_v1_model_bundles_get( self, @@ -249,7 +286,7 @@ def list_model_bundles_v1_model_bundles_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -264,8 +301,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -275,8 +313,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -286,8 +323,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -302,5 +341,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_bundles/get.pyi b/launch/api_client/paths/v1_model_bundles/get.pyi deleted file mode 100644 index 2d2cf510..00000000 --- a/launch/api_client/paths/v1_model_bundles/get.pyi +++ /dev/null @@ -1,269 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.list_model_bundles_v1_response import ( - ListModelBundlesV1Response, -) -from launch_client.model.model_bundle_order_by import ModelBundleOrderBy -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelNameSchema = schemas.StrSchema -OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) -RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", - { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], - }, - total=False, -) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ListModelBundlesV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_model_bundles_v1_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Bundles - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListModelBundlesV1ModelBundlesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_bundles_v1_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_model_bundles_v1_model_bundles_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_model_bundles_v1_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_model_bundles_v1_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v1_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v1_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_bundles/post.py b/launch/api_client/paths/v1_model_bundles/post.py index c850d079..6c9e4941 100644 --- a/launch/api_client/paths/v1_model_bundles/post.py +++ b/launch/api_client/paths/v1_model_bundles/post.py @@ -38,12 +38,14 @@ request_body_create_model_bundle_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_model_bundle_v1_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_model_bundle_v1_model_bundles_post_oapg( @@ -106,8 +117,10 @@ def _create_model_bundle_v1_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_model_bundle_v1_model_bundles_post_oapg( @@ -118,8 +131,7 @@ def _create_model_bundle_v1_model_bundles_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_model_bundle_v1_model_bundles_post_oapg( @@ -130,13 +142,15 @@ def _create_model_bundle_v1_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_model_bundle_v1_model_bundles_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_model_bundle_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_model_bundle_v1_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_model_bundle_v1_model_bundles_post( @@ -218,8 +236,10 @@ def create_model_bundle_v1_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_model_bundle_v1_model_bundles_post( @@ -230,8 +250,7 @@ def create_model_bundle_v1_model_bundles_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_model_bundle_v1_model_bundles_post( @@ -242,13 +261,15 @@ def create_model_bundle_v1_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_model_bundle_v1_model_bundles_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_model_bundle_v1_model_bundles_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_bundles_clone_with_changes/__init__.py b/launch/api_client/paths/v1_model_bundles_clone_with_changes/__init__.py index 35511bd5..b5c960ff 100644 --- a/launch/api_client/paths/v1_model_bundles_clone_with_changes/__init__.py +++ b/launch/api_client/paths/v1_model_bundles_clone_with_changes/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES +path = PathValues.V1_MODELBUNDLES_CLONEWITHCHANGES \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.py b/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.py index d6665999..5a2dec3e 100644 --- a/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.py +++ b/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.py @@ -38,12 +38,14 @@ request_body_clone_model_bundle_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oa stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( @@ -106,8 +117,10 @@ def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oa stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( @@ -118,8 +131,7 @@ def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oa accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( @@ -130,13 +142,15 @@ def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oa stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_clone_model_bundle_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( @@ -218,8 +236,10 @@ def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( @@ -230,8 +250,7 @@ def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( @@ -242,13 +261,15 @@ def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.pyi b/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.pyi deleted file mode 100644 index 550e5780..00000000 --- a/launch/api_client/paths/v1_model_bundles_clone_with_changes/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.clone_model_bundle_v1_request import ( - CloneModelBundleV1Request, -) -from launch_client.model.create_model_bundle_v1_response import ( - CreateModelBundleV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CloneModelBundleV1Request - -request_body_clone_model_bundle_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Clone Model Bundle With Changes - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_clone_model_bundle_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CloneModelBundleWithChangesV1ModelBundlesCloneWithChangesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_bundles_latest/__init__.py b/launch/api_client/paths/v1_model_bundles_latest/__init__.py index b5e54b32..e8834ca7 100644 --- a/launch/api_client/paths/v1_model_bundles_latest/__init__.py +++ b/launch/api_client/paths/v1_model_bundles_latest/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELBUNDLES_LATEST +path = PathValues.V1_MODELBUNDLES_LATEST \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles_latest/get.py b/launch/api_client/paths/v1_model_bundles_latest/get.py index e4397a5a..72da3ddf 100644 --- a/launch/api_client/paths/v1_model_bundles_latest/get.py +++ b/launch/api_client/paths/v1_model_bundles_latest/get.py @@ -32,15 +32,17 @@ # Query params ModelNameSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'model_name': typing.Union[ModelNameSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -55,7 +57,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ModelBundleV1Response @@ -63,14 +66,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -79,21 +85,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -105,8 +116,9 @@ def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( @@ -116,8 +128,7 @@ def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( @@ -127,8 +138,10 @@ def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( self, @@ -148,7 +161,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_model_name,): + for parameter in ( + request_query_model_name, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -162,11 +177,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -183,7 +198,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -199,8 +218,9 @@ def get_latest_model_bundle_v1_model_bundles_latest_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_latest_model_bundle_v1_model_bundles_latest_get( @@ -210,8 +230,7 @@ def get_latest_model_bundle_v1_model_bundles_latest_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_latest_model_bundle_v1_model_bundles_latest_get( @@ -221,8 +240,10 @@ def get_latest_model_bundle_v1_model_bundles_latest_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_latest_model_bundle_v1_model_bundles_latest_get( self, @@ -237,7 +258,7 @@ def get_latest_model_bundle_v1_model_bundles_latest_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -252,8 +273,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -263,8 +285,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -274,8 +295,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -290,5 +313,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_bundles_latest/get.pyi b/launch/api_client/paths/v1_model_bundles_latest/get.pyi deleted file mode 100644 index bacb33b7..00000000 --- a/launch/api_client/paths/v1_model_bundles_latest/get.pyi +++ /dev/null @@ -1,255 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.model_bundle_v1_response import ModelBundleV1Response -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - required=True, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ModelBundleV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Latest Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_name,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetLatestModelBundleV1ModelBundlesLatestGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_latest_model_bundle_v1_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v1_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_bundles_model_bundle_id/__init__.py b/launch/api_client/paths/v1_model_bundles_model_bundle_id/__init__.py index d3532d3a..aeafa82e 100644 --- a/launch/api_client/paths/v1_model_bundles_model_bundle_id/__init__.py +++ b/launch/api_client/paths/v1_model_bundles_model_bundle_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID +path = PathValues.V1_MODELBUNDLES_MODEL_BUNDLE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.py b/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.py index f79165c8..551e1342 100644 --- a/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.py +++ b/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.py @@ -32,15 +32,17 @@ # Path params ModelBundleIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_bundle_id': typing.Union[ModelBundleIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_bundle_id": typing.Union[ - ModelBundleIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ModelBundleV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( @@ -115,8 +127,7 @@ def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( @@ -126,8 +137,10 @@ def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_bundle_id,): + for parameter in ( + request_path_model_bundle_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_model_bundle_v1_model_bundles_model_bundle_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_model_bundle_v1_model_bundles_model_bundle_id_get( @@ -209,8 +229,7 @@ def get_model_bundle_v1_model_bundles_model_bundle_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_model_bundle_v1_model_bundles_model_bundle_id_get( @@ -220,8 +239,10 @@ def get_model_bundle_v1_model_bundles_model_bundle_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_model_bundle_v1_model_bundles_model_bundle_id_get( self, @@ -236,7 +257,7 @@ def get_model_bundle_v1_model_bundles_model_bundle_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.pyi b/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.pyi deleted file mode 100644 index 4af810ea..00000000 --- a/launch/api_client/paths/v1_model_bundles_model_bundle_id/get.pyi +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.model_bundle_v1_response import ModelBundleV1Response -from urllib3._collections import HTTPHeaderDict - -# Path params -ModelBundleIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_bundle_id": typing.Union[ - ModelBundleIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_model_bundle_id = api_client.PathParameter( - name="model_bundle_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelBundleIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = ModelBundleV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_bundle_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetModelBundleV1ModelBundlesModelBundleIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_model_bundle_v1_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v1_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_endpoints/__init__.py b/launch/api_client/paths/v1_model_endpoints/__init__.py index f58f6045..63a2873d 100644 --- a/launch/api_client/paths/v1_model_endpoints/__init__.py +++ b/launch/api_client/paths/v1_model_endpoints/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELENDPOINTS +path = PathValues.V1_MODELENDPOINTS \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints/get.py b/launch/api_client/paths/v1_model_endpoints/get.py index e572a293..6b93f989 100644 --- a/launch/api_client/paths/v1_model_endpoints/get.py +++ b/launch/api_client/paths/v1_model_endpoints/get.py @@ -33,19 +33,39 @@ from . import path # Query params -NameSchema = schemas.StrSchema + + +class NameSchema( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'NameSchema': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) OrderBySchema = ModelEndpointOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) +RequestRequiredQueryParams = typing_extensions.TypedDict( + 'RequestRequiredQueryParams', + { + } +) RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", + 'RequestOptionalQueryParams', { - "name": typing.Union[ - NameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], + 'name': typing.Union[NameSchema, None, str, ], + 'order_by': typing.Union[OrderBySchema, ], }, - total=False, + total=False ) @@ -66,7 +86,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListModelEndpointsV1Response @@ -74,14 +95,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -90,21 +114,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -116,8 +145,9 @@ def _list_model_endpoints_v1_model_endpoints_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_model_endpoints_v1_model_endpoints_get_oapg( @@ -127,8 +157,7 @@ def _list_model_endpoints_v1_model_endpoints_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_model_endpoints_v1_model_endpoints_get_oapg( @@ -138,8 +167,10 @@ def _list_model_endpoints_v1_model_endpoints_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_model_endpoints_v1_model_endpoints_get_oapg( self, @@ -176,11 +207,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -197,7 +228,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -213,8 +248,9 @@ def list_model_endpoints_v1_model_endpoints_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_model_endpoints_v1_model_endpoints_get( @@ -224,8 +260,7 @@ def list_model_endpoints_v1_model_endpoints_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_model_endpoints_v1_model_endpoints_get( @@ -235,8 +270,10 @@ def list_model_endpoints_v1_model_endpoints_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_model_endpoints_v1_model_endpoints_get( self, @@ -251,7 +288,7 @@ def list_model_endpoints_v1_model_endpoints_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -266,8 +303,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -277,8 +315,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -288,8 +325,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -304,5 +343,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints/get.pyi b/launch/api_client/paths/v1_model_endpoints/get.pyi deleted file mode 100644 index fea7a870..00000000 --- a/launch/api_client/paths/v1_model_endpoints/get.pyi +++ /dev/null @@ -1,269 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.list_model_endpoints_v1_response import ( - ListModelEndpointsV1Response, -) -from launch_client.model.model_endpoint_order_by import ModelEndpointOrderBy -from urllib3._collections import HTTPHeaderDict - -# Query params -NameSchema = schemas.StrSchema -OrderBySchema = ModelEndpointOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) -RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", - { - "name": typing.Union[ - NameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], - }, - total=False, -) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_name = api_client.QueryParameter( - name="name", - style=api_client.ParameterStyle.FORM, - schema=NameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ListModelEndpointsV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_model_endpoints_v1_model_endpoints_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Endpoints - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListModelEndpointsV1ModelEndpointsGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_endpoints_v1_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_model_endpoints_v1_model_endpoints_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_model_endpoints_v1_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_model_endpoints_v1_model_endpoints_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_endpoints_v1_model_endpoints_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_endpoints/post.py b/launch/api_client/paths/v1_model_endpoints/post.py index 99b4a96e..65ce528a 100644 --- a/launch/api_client/paths/v1_model_endpoints/post.py +++ b/launch/api_client/paths/v1_model_endpoints/post.py @@ -38,12 +38,14 @@ request_body_create_model_endpoint_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateModelEndpointV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_model_endpoint_v1_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_model_endpoint_v1_model_endpoints_post_oapg( @@ -106,8 +117,10 @@ def _create_model_endpoint_v1_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_model_endpoint_v1_model_endpoints_post_oapg( @@ -118,8 +131,7 @@ def _create_model_endpoint_v1_model_endpoints_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_model_endpoint_v1_model_endpoints_post_oapg( @@ -130,13 +142,15 @@ def _create_model_endpoint_v1_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_model_endpoint_v1_model_endpoints_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_model_endpoint_v1_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_model_endpoint_v1_model_endpoints_post( @@ -218,8 +236,10 @@ def create_model_endpoint_v1_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_model_endpoint_v1_model_endpoints_post( @@ -230,8 +250,7 @@ def create_model_endpoint_v1_model_endpoints_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_model_endpoint_v1_model_endpoints_post( @@ -242,13 +261,15 @@ def create_model_endpoint_v1_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_model_endpoint_v1_model_endpoints_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_model_endpoint_v1_model_endpoints_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints/post.pyi b/launch/api_client/paths/v1_model_endpoints/post.pyi deleted file mode 100644 index 2af825d6..00000000 --- a/launch/api_client/paths/v1_model_endpoints/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_model_endpoint_v1_request import ( - CreateModelEndpointV1Request, -) -from launch_client.model.create_model_endpoint_v1_response import ( - CreateModelEndpointV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CreateModelEndpointV1Request - -request_body_create_model_endpoint_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateModelEndpointV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_model_endpoint_v1_model_endpoints_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_create_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateModelEndpointV1ModelEndpointsPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_model_endpoint_v1_model_endpoints_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_endpoint_v1_model_endpoints_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_endpoint_v1_model_endpoints_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_endpoints_api/__init__.py b/launch/api_client/paths/v1_model_endpoints_api/__init__.py index c4f1910d..69585e08 100644 --- a/launch/api_client/paths/v1_model_endpoints_api/__init__.py +++ b/launch/api_client/paths/v1_model_endpoints_api/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELENDPOINTSAPI +path = PathValues.V1_MODELENDPOINTSAPI \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_api/get.py b/launch/api_client/paths/v1_model_endpoints_api/get.py index d46a8f06..3b082c58 100644 --- a/launch/api_client/paths/v1_model_endpoints_api/get.py +++ b/launch/api_client/paths/v1_model_endpoints_api/get.py @@ -26,7 +26,8 @@ from . import path _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema @@ -34,20 +35,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -58,8 +64,9 @@ def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( @@ -68,8 +75,7 @@ def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( @@ -78,8 +84,10 @@ def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( self, @@ -100,11 +108,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -121,7 +129,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -136,8 +148,9 @@ def get_model_endpoints_api_v1_model_endpoints_api_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_model_endpoints_api_v1_model_endpoints_api_get( @@ -146,8 +159,7 @@ def get_model_endpoints_api_v1_model_endpoints_api_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_model_endpoints_api_v1_model_endpoints_api_get( @@ -156,8 +168,10 @@ def get_model_endpoints_api_v1_model_endpoints_api_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_model_endpoints_api_v1_model_endpoints_api_get( self, @@ -170,7 +184,7 @@ def get_model_endpoints_api_v1_model_endpoints_api_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -184,8 +198,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -194,8 +209,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -204,8 +218,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -218,5 +234,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints_api/get.pyi b/launch/api_client/paths/v1_model_endpoints_api/get.pyi deleted file mode 100644 index a95a6348..00000000 --- a/launch/api_client/paths/v1_model_endpoints_api/get.pyi +++ /dev/null @@ -1,190 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Endpoints Api - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetModelEndpointsApiV1ModelEndpointsApiGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_model_endpoints_api_v1_model_endpoints_api_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_api_v1_model_endpoints_api_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/__init__.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/__init__.py index fb6cdf7c..e382b4ff 100644 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/__init__.py +++ b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID +path = PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.py index 3add1108..eb4e8a7e 100644 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.py +++ b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.py @@ -32,15 +32,17 @@ # Path params ModelEndpointIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = DeleteModelEndpointV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( @@ -115,8 +127,7 @@ def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( @@ -126,8 +137,10 @@ def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_id,): + for parameter in ( + request_path_model_endpoint_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="delete".upper(), + method='delete'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( @@ -209,8 +229,7 @@ def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( @@ -220,8 +239,10 @@ def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( self, @@ -236,7 +257,7 @@ def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete( @@ -262,8 +284,7 @@ def delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete( @@ -273,8 +294,10 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete( self, @@ -289,5 +312,7 @@ def delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.pyi b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.pyi deleted file mode 100644 index 07bbc190..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/delete.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.delete_model_endpoint_v1_response import ( - DeleteModelEndpointV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_model_endpoint_id = api_client.PathParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = DeleteModelEndpointV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_endpoint_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="delete".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class DeleteModelEndpointV1ModelEndpointsModelEndpointIdDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_model_endpoint_v1_model_endpoints_model_endpoint_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.py index daa30f01..382f63f2 100644 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.py +++ b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.py @@ -32,15 +32,17 @@ # Path params ModelEndpointIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetModelEndpointV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( @@ -115,8 +127,7 @@ def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( @@ -126,8 +137,10 @@ def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_id,): + for parameter in ( + request_path_model_endpoint_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( @@ -209,8 +229,7 @@ def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( @@ -220,8 +239,10 @@ def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( self, @@ -236,7 +257,7 @@ def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.py index 6c1e4cf4..c5f4f94b 100644 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.py +++ b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.py @@ -35,15 +35,17 @@ # Path params ModelEndpointIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -62,12 +64,14 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): request_body_update_model_endpoint_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = UpdateModelEndpointV1Response @@ -75,14 +79,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -91,21 +98,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -119,8 +131,9 @@ def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( @@ -132,8 +145,10 @@ def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( @@ -145,8 +160,7 @@ def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( @@ -158,13 +172,15 @@ def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -181,7 +197,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_id,): + for parameter in ( + request_path_model_endpoint_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -189,29 +207,28 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_update_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='put'.upper(), headers=_headers, fields=_fields, body=_body, @@ -230,7 +247,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -248,8 +269,9 @@ def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( @@ -261,8 +283,10 @@ def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( @@ -274,8 +298,7 @@ def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( @@ -287,13 +310,15 @@ def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -307,7 +332,7 @@ def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -324,8 +349,9 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def put( @@ -337,8 +363,10 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def put( @@ -350,8 +378,7 @@ def put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def put( @@ -363,13 +390,15 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -383,5 +412,7 @@ def put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.pyi b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.pyi deleted file mode 100644 index 60495b21..00000000 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/put.pyi +++ /dev/null @@ -1,343 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.update_model_endpoint_v1_request import ( - UpdateModelEndpointV1Request, -) -from launch_client.model.update_model_endpoint_v1_response import ( - UpdateModelEndpointV1Response, -) -from urllib3._collections import HTTPHeaderDict - -# Path params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_model_endpoint_id = api_client.PathParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateModelEndpointV1Request - -request_body_update_model_endpoint_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = UpdateModelEndpointV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Model Endpoint - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_endpoint_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_update_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="put".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class UpdateModelEndpointV1ModelEndpointsModelEndpointIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def update_model_endpoint_v1_model_endpoints_model_endpoint_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_model_endpoint_v1_model_endpoints_model_endpoint_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/__init__.py b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/__init__.py new file mode 100644 index 00000000..e9519dcb --- /dev/null +++ b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/__init__.py @@ -0,0 +1,7 @@ +# do not import all endpoints into this module because that uses a lot of memory and stack frames +# if you need the ability to import all endpoints from this module, import them with +# from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart import Api + +from launch.api_client.paths import PathValues + +path = PathValues.V1_MODELENDPOINTS_MODEL_ENDPOINT_ID_RESTART \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.pyi b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/post.py similarity index 69% rename from launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.pyi rename to launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/post.py index 1c74f794..21362c71 100644 --- a/launch/api_client/paths/v1_model_endpoints_model_endpoint_id/get.pyi +++ b/launch/api_client/paths/v1_model_endpoints_model_endpoint_id_restart/post.py @@ -18,78 +18,109 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_model_endpoint_v1_response import ( - GetModelEndpointV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError from urllib3._collections import HTTPHeaderDict +from launch.api_client import schemas # noqa: F401 +from launch.api_client import api_client, exceptions +from launch.api_client.model.http_validation_error import HTTPValidationError +from launch.api_client.model.restart_model_endpoint_v1_response import ( + RestartModelEndpointV1Response, +) + +from . import path + # Path params ModelEndpointIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) + class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): pass + request_path_model_endpoint_id = api_client.PathParameter( name="model_endpoint_id", style=api_client.ParameterStyle.SIMPLE, schema=ModelEndpointIdSchema, required=True, ) -SchemaFor200ResponseBodyApplicationJson = GetModelEndpointV1Response +_auth = [ + 'OAuth2PasswordBearer', + 'HTTPBasic', +] +SchemaFor200ResponseBodyApplicationJson = RestartModelEndpointV1Response + @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError + @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) -_all_accept_content_types = ("application/json",) +_status_code_to_response = { + '200': _response_for_200, + '422': _response_for_422, +} +_all_accept_content_types = ( + 'application/json', +) + class BaseApi(api_client.Api): @typing.overload - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( + def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( + def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -97,16 +128,21 @@ def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( + def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def _restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -115,7 +151,7 @@ def _get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( skip_deserialization: bool = False, ): """ - Get Model Endpoint + Restart Model Endpoint :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances @@ -124,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_id,): + for parameter in ( + request_path_model_endpoint_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -132,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='post'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -159,24 +197,32 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response -class GetModelEndpointV1ModelEndpointsModelEndpointIdGet(BaseApi): + +class RestartModelEndpointV1ModelEndpointsModelEndpointIdRestartPost(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( + def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( + def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -184,16 +230,21 @@ def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( + def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -201,28 +252,32 @@ def get_model_endpoint_v1_model_endpoints_model_endpoint_id_get( timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( + return self._restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( path_params=path_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) -class ApiForget(BaseApi): + +class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @typing.overload - def get( + def post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def get( + def post( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -230,16 +285,21 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def get( + def post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -247,10 +307,12 @@ def get( timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._get_model_endpoint_v1_model_endpoints_model_endpoint_id_get_oapg( + return self._restart_model_endpoint_v1_model_endpoints_model_endpoint_id_restart_post_oapg( path_params=path_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints_schema_json/__init__.py b/launch/api_client/paths/v1_model_endpoints_schema_json/__init__.py index 4dd4f68d..bbefa10f 100644 --- a/launch/api_client/paths/v1_model_endpoints_schema_json/__init__.py +++ b/launch/api_client/paths/v1_model_endpoints_schema_json/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_MODELENDPOINTSSCHEMA_JSON +path = PathValues.V1_MODELENDPOINTSSCHEMA_JSON \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_endpoints_schema_json/get.py b/launch/api_client/paths/v1_model_endpoints_schema_json/get.py index 2cba49a7..a12646fa 100644 --- a/launch/api_client/paths/v1_model_endpoints_schema_json/get.py +++ b/launch/api_client/paths/v1_model_endpoints_schema_json/get.py @@ -26,7 +26,8 @@ from . import path _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema @@ -34,20 +35,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -58,8 +64,9 @@ def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( @@ -68,8 +75,7 @@ def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( @@ -78,8 +84,10 @@ def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( self, @@ -100,11 +108,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -121,7 +129,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -136,8 +148,9 @@ def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( @@ -146,8 +159,7 @@ def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( @@ -156,8 +168,10 @@ def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( self, @@ -170,7 +184,7 @@ def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -184,8 +198,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -194,8 +209,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -204,8 +218,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -218,5 +234,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_model_endpoints_schema_json/get.pyi b/launch/api_client/paths/v1_model_endpoints_schema_json/get.pyi deleted file mode 100644 index 25adcc5f..00000000 --- a/launch/api_client/paths/v1_model_endpoints_schema_json/get.pyi +++ /dev/null @@ -1,190 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Endpoints Schema - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetModelEndpointsSchemaV1ModelEndpointsSchemaJsonGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_model_endpoints_schema_v1_model_endpoints_schema_json_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_endpoints_schema_v1_model_endpoints_schema_json_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_streaming_tasks/__init__.py b/launch/api_client/paths/v1_streaming_tasks/__init__.py index 97c53796..f5b3efaa 100644 --- a/launch/api_client/paths/v1_streaming_tasks/__init__.py +++ b/launch/api_client/paths/v1_streaming_tasks/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_STREAMINGTASKS +path = PathValues.V1_STREAMINGTASKS \ No newline at end of file diff --git a/launch/api_client/paths/v1_streaming_tasks/post.py b/launch/api_client/paths/v1_streaming_tasks/post.py index 346acc43..366fd7d0 100644 --- a/launch/api_client/paths/v1_streaming_tasks/post.py +++ b/launch/api_client/paths/v1_streaming_tasks/post.py @@ -32,15 +32,17 @@ # Query params ModelEndpointIdSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -60,12 +62,14 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) request_body_sync_endpoint_predict_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema @@ -73,14 +77,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -89,21 +96,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -117,8 +129,9 @@ def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( @@ -130,8 +143,10 @@ def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( @@ -143,8 +158,7 @@ def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( @@ -156,13 +170,15 @@ def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -179,7 +195,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_id,): + for parameter in ( + request_query_model_endpoint_id, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -189,27 +207,26 @@ class instances for serialized_value in serialized_data.values(): used_path += serialized_value - _headers = HTTPHeaderDict(self.api_client.default_headers) + _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_sync_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -228,7 +245,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -246,8 +267,9 @@ def create_streaming_inference_task_v1_streaming_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_streaming_inference_task_v1_streaming_tasks_post( @@ -259,8 +281,10 @@ def create_streaming_inference_task_v1_streaming_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_streaming_inference_task_v1_streaming_tasks_post( @@ -272,8 +296,7 @@ def create_streaming_inference_task_v1_streaming_tasks_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_streaming_inference_task_v1_streaming_tasks_post( @@ -285,13 +308,15 @@ def create_streaming_inference_task_v1_streaming_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_streaming_inference_task_v1_streaming_tasks_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -305,7 +330,7 @@ def create_streaming_inference_task_v1_streaming_tasks_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -322,8 +347,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -335,8 +361,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -348,8 +376,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -361,13 +388,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -381,5 +410,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_streaming_tasks/post.pyi b/launch/api_client/paths/v1_streaming_tasks/post.pyi deleted file mode 100644 index d0a6cfed..00000000 --- a/launch/api_client/paths/v1_streaming_tasks/post.pyi +++ /dev/null @@ -1,341 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.sync_endpoint_predict_v1_request import ( - SyncEndpointPredictV1Request, -) -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_endpoint_id = api_client.QueryParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointIdSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = SyncEndpointPredictV1Request - -request_body_sync_endpoint_predict_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = schemas.AnyTypeSchema - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_streaming_inference_task_v1_streaming_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Streaming Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_id,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_sync_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateStreamingInferenceTaskV1StreamingTasksPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_streaming_inference_task_v1_streaming_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_streaming_inference_task_v1_streaming_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_streaming_inference_task_v1_streaming_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_sync_tasks/__init__.py b/launch/api_client/paths/v1_sync_tasks/__init__.py index e05b06c3..0ad403fe 100644 --- a/launch/api_client/paths/v1_sync_tasks/__init__.py +++ b/launch/api_client/paths/v1_sync_tasks/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_SYNCTASKS +path = PathValues.V1_SYNCTASKS \ No newline at end of file diff --git a/launch/api_client/paths/v1_sync_tasks/post.py b/launch/api_client/paths/v1_sync_tasks/post.py index dd4cce00..c2cda3fc 100644 --- a/launch/api_client/paths/v1_sync_tasks/post.py +++ b/launch/api_client/paths/v1_sync_tasks/post.py @@ -35,15 +35,17 @@ # Query params ModelEndpointIdSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'model_endpoint_id': typing.Union[ModelEndpointIdSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -63,12 +65,14 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) request_body_sync_endpoint_predict_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = SyncEndpointPredictV1Response @@ -76,14 +80,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -92,21 +99,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -120,8 +132,9 @@ def _create_sync_inference_task_v1_sync_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_sync_inference_task_v1_sync_tasks_post_oapg( @@ -133,8 +146,10 @@ def _create_sync_inference_task_v1_sync_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_sync_inference_task_v1_sync_tasks_post_oapg( @@ -146,8 +161,7 @@ def _create_sync_inference_task_v1_sync_tasks_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_sync_inference_task_v1_sync_tasks_post_oapg( @@ -159,13 +173,15 @@ def _create_sync_inference_task_v1_sync_tasks_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_sync_inference_task_v1_sync_tasks_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -182,7 +198,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_id,): + for parameter in ( + request_query_model_endpoint_id, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -192,27 +210,26 @@ class instances for serialized_value in serialized_data.values(): used_path += serialized_value - _headers = HTTPHeaderDict(self.api_client.default_headers) + _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_sync_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -231,7 +248,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -249,8 +270,9 @@ def create_sync_inference_task_v1_sync_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_sync_inference_task_v1_sync_tasks_post( @@ -262,8 +284,10 @@ def create_sync_inference_task_v1_sync_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_sync_inference_task_v1_sync_tasks_post( @@ -275,8 +299,7 @@ def create_sync_inference_task_v1_sync_tasks_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_sync_inference_task_v1_sync_tasks_post( @@ -288,13 +311,15 @@ def create_sync_inference_task_v1_sync_tasks_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_sync_inference_task_v1_sync_tasks_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -308,7 +333,7 @@ def create_sync_inference_task_v1_sync_tasks_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -325,8 +350,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -338,8 +364,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -351,8 +379,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -364,13 +391,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', query_params: RequestQueryParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -384,5 +413,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_sync_tasks/post.pyi b/launch/api_client/paths/v1_sync_tasks/post.pyi deleted file mode 100644 index 172f9066..00000000 --- a/launch/api_client/paths/v1_sync_tasks/post.pyi +++ /dev/null @@ -1,344 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.sync_endpoint_predict_v1_request import ( - SyncEndpointPredictV1Request, -) -from launch_client.model.sync_endpoint_predict_v1_response import ( - SyncEndpointPredictV1Response, -) -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelEndpointIdSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_endpoint_id": typing.Union[ - ModelEndpointIdSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_endpoint_id = api_client.QueryParameter( - name="model_endpoint_id", - style=api_client.ParameterStyle.FORM, - schema=ModelEndpointIdSchema, - required=True, - explode=True, -) -# body param -SchemaForRequestBodyApplicationJson = SyncEndpointPredictV1Request - -request_body_sync_endpoint_predict_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = SyncEndpointPredictV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_sync_inference_task_v1_sync_tasks_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Sync Inference Task - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_endpoint_id,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_sync_endpoint_predict_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateSyncInferenceTaskV1SyncTasksPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_sync_inference_task_v1_sync_tasks_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_sync_inference_task_v1_sync_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_sync_inference_task_v1_sync_tasks_post_oapg( - body=body, - query_params=query_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_triggers/__init__.py b/launch/api_client/paths/v1_triggers/__init__.py index da4d5e4d..85662b08 100644 --- a/launch/api_client/paths/v1_triggers/__init__.py +++ b/launch/api_client/paths/v1_triggers/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_TRIGGERS +path = PathValues.V1_TRIGGERS \ No newline at end of file diff --git a/launch/api_client/paths/v1_triggers/get.py b/launch/api_client/paths/v1_triggers/get.py index 72e96bdd..e57b9ff3 100644 --- a/launch/api_client/paths/v1_triggers/get.py +++ b/launch/api_client/paths/v1_triggers/get.py @@ -29,7 +29,8 @@ from . import path _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListTriggersV1Response @@ -37,20 +38,25 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, + '200': _response_for_200, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -61,8 +67,9 @@ def _list_triggers_v1_triggers_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_triggers_v1_triggers_get_oapg( @@ -71,8 +78,7 @@ def _list_triggers_v1_triggers_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_triggers_v1_triggers_get_oapg( @@ -81,8 +87,10 @@ def _list_triggers_v1_triggers_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_triggers_v1_triggers_get_oapg( self, @@ -103,11 +111,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -124,7 +132,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -139,8 +151,9 @@ def list_triggers_v1_triggers_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_triggers_v1_triggers_get( @@ -149,8 +162,7 @@ def list_triggers_v1_triggers_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_triggers_v1_triggers_get( @@ -159,8 +171,10 @@ def list_triggers_v1_triggers_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_triggers_v1_triggers_get( self, @@ -173,7 +187,7 @@ def list_triggers_v1_triggers_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -187,8 +201,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -197,8 +212,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -207,8 +221,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -221,5 +237,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_triggers/get.pyi b/launch/api_client/paths/v1_triggers/get.pyi deleted file mode 100644 index 7789bcd1..00000000 --- a/launch/api_client/paths/v1_triggers/get.pyi +++ /dev/null @@ -1,193 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.list_triggers_v1_response import ( - ListTriggersV1Response, -) -from urllib3._collections import HTTPHeaderDict - -SchemaFor200ResponseBodyApplicationJson = ListTriggersV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_triggers_v1_triggers_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_triggers_v1_triggers_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_triggers_v1_triggers_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_triggers_v1_triggers_get_oapg( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Triggers - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListTriggersV1TriggersGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_triggers_v1_triggers_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_triggers_v1_triggers_get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_triggers_v1_triggers_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_triggers_v1_triggers_get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_triggers_v1_triggers_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_triggers_v1_triggers_get_oapg( - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_triggers/post.py b/launch/api_client/paths/v1_triggers/post.py index 82e45d1c..25de106c 100644 --- a/launch/api_client/paths/v1_triggers/post.py +++ b/launch/api_client/paths/v1_triggers/post.py @@ -38,12 +38,14 @@ request_body_create_trigger_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateTriggerV1Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_trigger_v1_triggers_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_trigger_v1_triggers_post_oapg( @@ -106,8 +117,10 @@ def _create_trigger_v1_triggers_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_trigger_v1_triggers_post_oapg( @@ -118,8 +131,7 @@ def _create_trigger_v1_triggers_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_trigger_v1_triggers_post_oapg( @@ -130,13 +142,15 @@ def _create_trigger_v1_triggers_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_trigger_v1_triggers_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_trigger_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_trigger_v1_triggers_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_trigger_v1_triggers_post( @@ -218,8 +236,10 @@ def create_trigger_v1_triggers_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_trigger_v1_triggers_post( @@ -230,8 +250,7 @@ def create_trigger_v1_triggers_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_trigger_v1_triggers_post( @@ -242,13 +261,15 @@ def create_trigger_v1_triggers_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_trigger_v1_triggers_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_trigger_v1_triggers_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_triggers/post.pyi b/launch/api_client/paths/v1_triggers/post.pyi deleted file mode 100644 index b5286049..00000000 --- a/launch/api_client/paths/v1_triggers/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_trigger_v1_request import ( - CreateTriggerV1Request, -) -from launch_client.model.create_trigger_v1_response import ( - CreateTriggerV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CreateTriggerV1Request - -request_body_create_trigger_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateTriggerV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_trigger_v1_triggers_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_create_trigger_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateTriggerV1TriggersPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_trigger_v1_triggers_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_trigger_v1_triggers_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_trigger_v1_triggers_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_triggers_trigger_id/__init__.py b/launch/api_client/paths/v1_triggers_trigger_id/__init__.py index 10b3f446..fbafad54 100644 --- a/launch/api_client/paths/v1_triggers_trigger_id/__init__.py +++ b/launch/api_client/paths/v1_triggers_trigger_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V1_TRIGGERS_TRIGGER_ID +path = PathValues.V1_TRIGGERS_TRIGGER_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_triggers_trigger_id/delete.py b/launch/api_client/paths/v1_triggers_trigger_id/delete.py index 4c2a5150..14680356 100644 --- a/launch/api_client/paths/v1_triggers_trigger_id/delete.py +++ b/launch/api_client/paths/v1_triggers_trigger_id/delete.py @@ -32,15 +32,17 @@ # Path params TriggerIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'trigger_id': typing.Union[TriggerIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = DeleteTriggerV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _delete_trigger_v1_triggers_trigger_id_delete_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _delete_trigger_v1_triggers_trigger_id_delete_oapg( @@ -115,8 +127,7 @@ def _delete_trigger_v1_triggers_trigger_id_delete_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _delete_trigger_v1_triggers_trigger_id_delete_oapg( @@ -126,8 +137,10 @@ def _delete_trigger_v1_triggers_trigger_id_delete_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _delete_trigger_v1_triggers_trigger_id_delete_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_trigger_id,): + for parameter in ( + request_path_trigger_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="delete".upper(), + method='delete'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def delete_trigger_v1_triggers_trigger_id_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete_trigger_v1_triggers_trigger_id_delete( @@ -209,8 +229,7 @@ def delete_trigger_v1_triggers_trigger_id_delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete_trigger_v1_triggers_trigger_id_delete( @@ -220,8 +239,10 @@ def delete_trigger_v1_triggers_trigger_id_delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete_trigger_v1_triggers_trigger_id_delete( self, @@ -236,7 +257,7 @@ def delete_trigger_v1_triggers_trigger_id_delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def delete( @@ -262,8 +284,7 @@ def delete( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def delete( @@ -273,8 +294,10 @@ def delete( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def delete( self, @@ -289,5 +312,7 @@ def delete( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_triggers_trigger_id/delete.pyi b/launch/api_client/paths/v1_triggers_trigger_id/delete.pyi deleted file mode 100644 index a58d2e56..00000000 --- a/launch/api_client/paths/v1_triggers_trigger_id/delete.pyi +++ /dev/null @@ -1,256 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.delete_trigger_v1_response import ( - DeleteTriggerV1Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -TriggerIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_trigger_id = api_client.PathParameter( - name="trigger_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TriggerIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = DeleteTriggerV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _delete_trigger_v1_triggers_trigger_id_delete_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Delete Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_trigger_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="delete".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class DeleteTriggerV1TriggersTriggerIdDelete(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def delete_trigger_v1_triggers_trigger_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete_trigger_v1_triggers_trigger_id_delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete_trigger_v1_triggers_trigger_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete_trigger_v1_triggers_trigger_id_delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_trigger_v1_triggers_trigger_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiFordelete(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def delete( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def delete( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._delete_trigger_v1_triggers_trigger_id_delete_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_triggers_trigger_id/get.py b/launch/api_client/paths/v1_triggers_trigger_id/get.py index e231e060..e99f9008 100644 --- a/launch/api_client/paths/v1_triggers_trigger_id/get.py +++ b/launch/api_client/paths/v1_triggers_trigger_id/get.py @@ -32,15 +32,17 @@ # Path params TriggerIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'trigger_id': typing.Union[TriggerIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = GetTriggerV1Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_trigger_v1_triggers_trigger_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_trigger_v1_triggers_trigger_id_get_oapg( @@ -115,8 +127,7 @@ def _get_trigger_v1_triggers_trigger_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_trigger_v1_triggers_trigger_id_get_oapg( @@ -126,8 +137,10 @@ def _get_trigger_v1_triggers_trigger_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_trigger_v1_triggers_trigger_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_trigger_id,): + for parameter in ( + request_path_trigger_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_trigger_v1_triggers_trigger_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_trigger_v1_triggers_trigger_id_get( @@ -209,8 +229,7 @@ def get_trigger_v1_triggers_trigger_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_trigger_v1_triggers_trigger_id_get( @@ -220,8 +239,10 @@ def get_trigger_v1_triggers_trigger_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_trigger_v1_triggers_trigger_id_get( self, @@ -236,7 +257,7 @@ def get_trigger_v1_triggers_trigger_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_triggers_trigger_id/get.pyi b/launch/api_client/paths/v1_triggers_trigger_id/get.pyi deleted file mode 100644 index 84176c75..00000000 --- a/launch/api_client/paths/v1_triggers_trigger_id/get.pyi +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_trigger_v1_response import GetTriggerV1Response -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# Path params -TriggerIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_trigger_id = api_client.PathParameter( - name="trigger_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TriggerIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = GetTriggerV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_trigger_v1_triggers_trigger_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_trigger_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetTriggerV1TriggersTriggerIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_trigger_v1_triggers_trigger_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_trigger_v1_triggers_trigger_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_trigger_v1_triggers_trigger_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_trigger_v1_triggers_trigger_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_trigger_v1_triggers_trigger_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_trigger_v1_triggers_trigger_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v1_triggers_trigger_id/put.py b/launch/api_client/paths/v1_triggers_trigger_id/put.py index 680997c8..80d7b839 100644 --- a/launch/api_client/paths/v1_triggers_trigger_id/put.py +++ b/launch/api_client/paths/v1_triggers_trigger_id/put.py @@ -35,15 +35,17 @@ # Path params TriggerIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'trigger_id': typing.Union[TriggerIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -62,12 +64,14 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): request_body_update_trigger_v1_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = UpdateTriggerV1Response @@ -75,14 +79,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -91,21 +98,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -119,8 +131,9 @@ def _update_trigger_v1_triggers_trigger_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _update_trigger_v1_triggers_trigger_id_put_oapg( @@ -132,8 +145,10 @@ def _update_trigger_v1_triggers_trigger_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _update_trigger_v1_triggers_trigger_id_put_oapg( @@ -145,8 +160,7 @@ def _update_trigger_v1_triggers_trigger_id_put_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _update_trigger_v1_triggers_trigger_id_put_oapg( @@ -158,13 +172,15 @@ def _update_trigger_v1_triggers_trigger_id_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _update_trigger_v1_triggers_trigger_id_put_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -181,7 +197,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_trigger_id,): + for parameter in ( + request_path_trigger_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -189,29 +207,28 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_update_trigger_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='put'.upper(), headers=_headers, fields=_fields, body=_body, @@ -230,7 +247,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -248,8 +269,9 @@ def update_trigger_v1_triggers_trigger_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def update_trigger_v1_triggers_trigger_id_put( @@ -261,8 +283,10 @@ def update_trigger_v1_triggers_trigger_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def update_trigger_v1_triggers_trigger_id_put( @@ -274,8 +298,7 @@ def update_trigger_v1_triggers_trigger_id_put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def update_trigger_v1_triggers_trigger_id_put( @@ -287,13 +310,15 @@ def update_trigger_v1_triggers_trigger_id_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def update_trigger_v1_triggers_trigger_id_put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -307,7 +332,7 @@ def update_trigger_v1_triggers_trigger_id_put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -324,8 +349,9 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def put( @@ -337,8 +363,10 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def put( @@ -350,8 +378,7 @@ def put( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def put( @@ -363,13 +390,15 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def put( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -383,5 +412,7 @@ def put( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_triggers_trigger_id/put.pyi b/launch/api_client/paths/v1_triggers_trigger_id/put.pyi deleted file mode 100644 index 6e8fbc76..00000000 --- a/launch/api_client/paths/v1_triggers_trigger_id/put.pyi +++ /dev/null @@ -1,343 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.update_trigger_v1_request import ( - UpdateTriggerV1Request, -) -from launch_client.model.update_trigger_v1_response import ( - UpdateTriggerV1Response, -) -from urllib3._collections import HTTPHeaderDict - -# Path params -TriggerIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "trigger_id": typing.Union[ - TriggerIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_trigger_id = api_client.PathParameter( - name="trigger_id", - style=api_client.ParameterStyle.SIMPLE, - schema=TriggerIdSchema, - required=True, -) -# body param -SchemaForRequestBodyApplicationJson = UpdateTriggerV1Request - -request_body_update_trigger_v1_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = UpdateTriggerV1Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _update_trigger_v1_triggers_trigger_id_put_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Update Trigger - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_trigger_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_update_trigger_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="put".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class UpdateTriggerV1TriggersTriggerIdPut(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def update_trigger_v1_triggers_trigger_id_put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_trigger_v1_triggers_trigger_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForput(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def put( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._update_trigger_v1_triggers_trigger_id_put_oapg( - body=body, - path_params=path_params, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v2_batch_completions/__init__.py b/launch/api_client/paths/v2_batch_completions/__init__.py new file mode 100644 index 00000000..3d318666 --- /dev/null +++ b/launch/api_client/paths/v2_batch_completions/__init__.py @@ -0,0 +1,7 @@ +# do not import all endpoints into this module because that uses a lot of memory and stack frames +# if you need the ability to import all endpoints from this module, import them with +# from launch.api_client.paths.v2_batch_completions import Api + +from launch.api_client.paths import PathValues + +path = PathValues.V2_BATCHCOMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_batch_completions/post.pyi b/launch/api_client/paths/v2_batch_completions/post.py similarity index 70% rename from launch/api_client/paths/v1_llm_batch_completions/post.pyi rename to launch/api_client/paths/v2_batch_completions/post.py index 0ac21685..3816de3d 100644 --- a/launch/api_client/paths/v1_llm_batch_completions/post.pyi +++ b/launch/api_client/paths/v2_batch_completions/post.py @@ -18,59 +18,83 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_batch_completions_request import ( - CreateBatchCompletionsRequest, -) -from launch_client.model.create_batch_completions_response import ( - CreateBatchCompletionsResponse, -) -from launch_client.model.http_validation_error import HTTPValidationError from urllib3._collections import HTTPHeaderDict +from launch.api_client import schemas # noqa: F401 +from launch.api_client import api_client, exceptions +from launch.api_client.model.batch_completions_job import BatchCompletionsJob +from launch.api_client.model.create_batch_completions_v2_request import ( + CreateBatchCompletionsV2Request, +) +from launch.api_client.model.http_validation_error import HTTPValidationError + +from . import path + # body param -SchemaForRequestBodyApplicationJson = CreateBatchCompletionsRequest +SchemaForRequestBodyApplicationJson = CreateBatchCompletionsV2Request -request_body_create_batch_completions_request = api_client.RequestBody( + +request_body_create_batch_completions_v2_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) -SchemaFor200ResponseBodyApplicationJson = CreateBatchCompletionsResponse +_auth = [ + 'OAuth2PasswordBearer', + 'HTTPBasic', +] +SchemaFor200ResponseBodyApplicationJson = BatchCompletionsJob + @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError + @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) -_all_accept_content_types = ("application/json",) +_status_code_to_response = { + '200': _response_for_200, + '422': _response_for_422, +} +_all_accept_content_types = ( + 'application/json', +) + class BaseApi(api_client.Api): @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( + def _batch_completions_v2_batch_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -78,9 +102,12 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( + def _batch_completions_v2_batch_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -88,9 +115,13 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( + def _batch_completions_v2_batch_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -99,8 +130,9 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def _create_batch_completions_v1_llm_batch_completions_post_oapg( + def _batch_completions_v2_batch_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -108,18 +140,22 @@ def _create_batch_completions_v1_llm_batch_completions_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_batch_completions_v1_llm_batch_completions_post_oapg( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def _batch_completions_v2_batch_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): """ - Create Batch Completions + Batch Completions :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances @@ -130,23 +166,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None - serialized_data = request_body_create_batch_completions_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + serialized_data = request_body_create_batch_completions_v2_request.serialize(body, content_type) + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -165,15 +200,20 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response -class CreateBatchCompletionsV1LlmBatchCompletionsPost(BaseApi): + +class BatchCompletionsV2BatchCompletionsPost(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( + def batch_completions_v2_batch_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -181,9 +221,12 @@ def create_batch_completions_v1_llm_batch_completions_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( + def batch_completions_v2_batch_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -191,9 +234,13 @@ def create_batch_completions_v1_llm_batch_completions_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( + def batch_completions_v2_batch_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -202,8 +249,9 @@ def create_batch_completions_v1_llm_batch_completions_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def create_batch_completions_v1_llm_batch_completions_post( + def batch_completions_v2_batch_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -211,25 +259,30 @@ def create_batch_completions_v1_llm_batch_completions_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_batch_completions_v1_llm_batch_completions_post( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def batch_completions_v2_batch_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._create_batch_completions_v1_llm_batch_completions_post_oapg( + return self._batch_completions_v2_batch_completions_post_oapg( body=body, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @@ -242,7 +295,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( self, @@ -252,7 +308,11 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload def post( self, @@ -263,6 +323,7 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload def post( self, @@ -272,21 +333,27 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._create_batch_completions_v1_llm_batch_completions_post_oapg( + return self._batch_completions_v2_batch_completions_post_oapg( body=body, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_batch_completions_batch_completion_id/__init__.py b/launch/api_client/paths/v2_batch_completions_batch_completion_id/__init__.py new file mode 100644 index 00000000..58a7431f --- /dev/null +++ b/launch/api_client/paths/v2_batch_completions_batch_completion_id/__init__.py @@ -0,0 +1,7 @@ +# do not import all endpoints into this module because that uses a lot of memory and stack frames +# if you need the ability to import all endpoints from this module, import them with +# from launch.api_client.paths.v2_batch_completions_batch_completion_id import Api + +from launch.api_client.paths import PathValues + +path = PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID \ No newline at end of file diff --git a/launch/api_client/paths/v1_files_file_id_content/get.pyi b/launch/api_client/paths/v2_batch_completions_batch_completion_id/get.py similarity index 69% rename from launch/api_client/paths/v1_files_file_id_content/get.pyi rename to launch/api_client/paths/v2_batch_completions_batch_completion_id/get.py index 0fc0bb7d..20cc744e 100644 --- a/launch/api_client/paths/v1_files_file_id_content/get.pyi +++ b/launch/api_client/paths/v2_batch_completions_batch_completion_id/get.py @@ -18,78 +18,109 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.get_file_content_response import ( - GetFileContentResponse, -) -from launch_client.model.http_validation_error import HTTPValidationError from urllib3._collections import HTTPHeaderDict +from launch.api_client import schemas # noqa: F401 +from launch.api_client import api_client, exceptions +from launch.api_client.model.get_batch_completion_v2_response import ( + GetBatchCompletionV2Response, +) +from launch.api_client.model.http_validation_error import HTTPValidationError + +from . import path + # Path params -FileIdSchema = schemas.StrSchema +BatchCompletionIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'batch_completion_id': typing.Union[BatchCompletionIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "file_id": typing.Union[ - FileIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) + class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): pass -request_path_file_id = api_client.PathParameter( - name="file_id", + +request_path_batch_completion_id = api_client.PathParameter( + name="batch_completion_id", style=api_client.ParameterStyle.SIMPLE, - schema=FileIdSchema, + schema=BatchCompletionIdSchema, required=True, ) -SchemaFor200ResponseBodyApplicationJson = GetFileContentResponse +_auth = [ + 'OAuth2PasswordBearer', + 'HTTPBasic', +] +SchemaFor200ResponseBodyApplicationJson = GetBatchCompletionV2Response + @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError + @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) -_all_accept_content_types = ("application/json",) +_status_code_to_response = { + '200': _response_for_200, + '422': _response_for_422, +} +_all_accept_content_types = ( + 'application/json', +) + class BaseApi(api_client.Api): @typing.overload - def _get_file_content_v1_files_file_id_content_get_oapg( + def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def _get_file_content_v1_files_file_id_content_get_oapg( + def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -97,16 +128,21 @@ def _get_file_content_v1_files_file_id_content_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def _get_file_content_v1_files_file_id_content_get_oapg( + def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_file_content_v1_files_file_id_content_get_oapg( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def _get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -115,7 +151,7 @@ def _get_file_content_v1_files_file_id_content_get_oapg( skip_deserialization: bool = False, ): """ - Get File Content + Get Batch Completion :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances @@ -124,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_file_id,): + for parameter in ( + request_path_batch_completion_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -132,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -159,24 +197,32 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response -class GetFileContentV1FilesFileIdContentGet(BaseApi): + +class GetBatchCompletionV2BatchCompletionsBatchCompletionIdGet(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload - def get_file_content_v1_files_file_id_content_get( + def get_batch_completion_v2_batch_completions_batch_completion_id_get( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def get_file_content_v1_files_file_id_content_get( + def get_batch_completion_v2_batch_completions_batch_completion_id_get( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -184,16 +230,21 @@ def get_file_content_v1_files_file_id_content_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def get_file_content_v1_files_file_id_content_get( + def get_batch_completion_v2_batch_completions_batch_completion_id_get( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_file_content_v1_files_file_id_content_get( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def get_batch_completion_v2_batch_completions_batch_completion_id_get( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -201,14 +252,15 @@ def get_file_content_v1_files_file_id_content_get( timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._get_file_content_v1_files_file_id_content_get_oapg( + return self._get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( path_params=path_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + class ApiForget(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @@ -220,7 +272,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def get( self, @@ -230,6 +285,7 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload def get( self, @@ -238,7 +294,11 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + def get( self, path_params: RequestPathParams = frozendict.frozendict(), @@ -247,10 +307,12 @@ def get( timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._get_file_content_v1_files_file_id_content_get_oapg( + return self._get_batch_completion_v2_batch_completions_batch_completion_id_get_oapg( path_params=path_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.pyi b/launch/api_client/paths/v2_batch_completions_batch_completion_id/post.py similarity index 68% rename from launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.pyi rename to launch/api_client/paths/v2_batch_completions_batch_completion_id/post.py index b10aa83e..9617b9e3 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints_model_endpoint_name/put.pyi +++ b/launch/api_client/paths/v2_batch_completions_batch_completion_id/post.py @@ -18,81 +18,111 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.update_llm_model_endpoint_v1_request import ( - UpdateLLMModelEndpointV1Request, +from urllib3._collections import HTTPHeaderDict + +from launch.api_client import schemas # noqa: F401 +from launch.api_client import api_client, exceptions +from launch.api_client.model.http_validation_error import HTTPValidationError +from launch.api_client.model.update_batch_completions_v2_request import ( + UpdateBatchCompletionsV2Request, ) -from launch_client.model.update_llm_model_endpoint_v1_response import ( - UpdateLLMModelEndpointV1Response, +from launch.api_client.model.update_batch_completions_v2_response import ( + UpdateBatchCompletionsV2Response, ) -from urllib3._collections import HTTPHeaderDict + +from . import path # Path params -ModelEndpointNameSchema = schemas.StrSchema +BatchCompletionIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'batch_completion_id': typing.Union[BatchCompletionIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_endpoint_name": typing.Union[ - ModelEndpointNameSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) + class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): pass -request_path_model_endpoint_name = api_client.PathParameter( - name="model_endpoint_name", + +request_path_batch_completion_id = api_client.PathParameter( + name="batch_completion_id", style=api_client.ParameterStyle.SIMPLE, - schema=ModelEndpointNameSchema, + schema=BatchCompletionIdSchema, required=True, ) # body param -SchemaForRequestBodyApplicationJson = UpdateLLMModelEndpointV1Request +SchemaForRequestBodyApplicationJson = UpdateBatchCompletionsV2Request + -request_body_update_llm_model_endpoint_v1_request = api_client.RequestBody( +request_body_update_batch_completions_v2_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) -SchemaFor200ResponseBodyApplicationJson = UpdateLLMModelEndpointV1Response +_auth = [ + 'OAuth2PasswordBearer', + 'HTTPBasic', +] +SchemaFor200ResponseBodyApplicationJson = UpdateBatchCompletionsV2Response + @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError + @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) -_all_accept_content_types = ("application/json",) +_status_code_to_response = { + '200': _response_for_200, + '422': _response_for_422, +} +_all_accept_content_types = ( + 'application/json', +) + class BaseApi(api_client.Api): @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( + def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -101,9 +131,12 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( + def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -112,9 +145,13 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( + def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -124,8 +161,9 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( + def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -134,11 +172,15 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def _update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, @@ -146,7 +188,7 @@ def _update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( skip_deserialization: bool = False, ): """ - Update Model Endpoint + Update Batch Completion :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances @@ -155,7 +197,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_endpoint_name,): + for parameter in ( + request_path_batch_completion_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -163,29 +207,28 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None - serialized_data = request_body_update_llm_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + serialized_data = request_body_update_batch_completions_v2_request.serialize(body, content_type) + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -204,15 +247,20 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response -class UpdateModelEndpointV1LlmModelEndpointsModelEndpointNamePut(BaseApi): + +class UpdateBatchCompletionV2BatchCompletionsBatchCompletionIdPost(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( + def update_batch_completion_v2_batch_completions_batch_completion_id_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -221,9 +269,12 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( + def update_batch_completion_v2_batch_completions_batch_completion_id_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -232,9 +283,13 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( + def update_batch_completion_v2_batch_completions_batch_completion_id_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -244,8 +299,9 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( + def update_batch_completion_v2_batch_completions_batch_completion_id_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -254,32 +310,37 @@ def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def update_batch_completion_v2_batch_completions_batch_completion_id_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( + return self._update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( body=body, path_params=path_params, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) -class ApiForput(BaseApi): + +class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @typing.overload - def put( + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -288,9 +349,12 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def put( + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -299,9 +363,13 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def put( + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -311,8 +379,9 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def put( + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -321,23 +390,29 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def put( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._update_model_endpoint_v1_llm_model_endpoints_model_endpoint_name_put_oapg( + return self._update_batch_completion_v2_batch_completions_batch_completion_id_post_oapg( body=body, path_params=path_params, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/__init__.py b/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/__init__.py new file mode 100644 index 00000000..f5d04293 --- /dev/null +++ b/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/__init__.py @@ -0,0 +1,7 @@ +# do not import all endpoints into this module because that uses a lot of memory and stack frames +# if you need the ability to import all endpoints from this module, import them with +# from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel import Api + +from launch.api_client.paths import PathValues + +path = PathValues.V2_BATCHCOMPLETIONS_BATCH_COMPLETION_ID_ACTIONS_CANCEL \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.pyi b/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/post.py similarity index 67% rename from launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.pyi rename to launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/post.py index b11cd36d..f875a254 100644 --- a/launch/api_client/paths/v1_llm_fine_tunes_fine_tune_id_cancel/put.pyi +++ b/launch/api_client/paths/v2_batch_completions_batch_completion_id_actions_cancel/post.py @@ -18,78 +18,109 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.cancel_fine_tune_response import ( - CancelFineTuneResponse, -) -from launch_client.model.http_validation_error import HTTPValidationError from urllib3._collections import HTTPHeaderDict +from launch.api_client import schemas # noqa: F401 +from launch.api_client import api_client, exceptions +from launch.api_client.model.cancel_batch_completions_v2_response import ( + CancelBatchCompletionsV2Response, +) +from launch.api_client.model.http_validation_error import HTTPValidationError + +from . import path + # Path params -FineTuneIdSchema = schemas.StrSchema +BatchCompletionIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'batch_completion_id': typing.Union[BatchCompletionIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "fine_tune_id": typing.Union[ - FineTuneIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) + class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): pass -request_path_fine_tune_id = api_client.PathParameter( - name="fine_tune_id", + +request_path_batch_completion_id = api_client.PathParameter( + name="batch_completion_id", style=api_client.ParameterStyle.SIMPLE, - schema=FineTuneIdSchema, + schema=BatchCompletionIdSchema, required=True, ) -SchemaFor200ResponseBodyApplicationJson = CancelFineTuneResponse +_auth = [ + 'OAuth2PasswordBearer', + 'HTTPBasic', +] +SchemaFor200ResponseBodyApplicationJson = CancelBatchCompletionsV2Response + @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError + @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) -_all_accept_content_types = ("application/json",) +_status_code_to_response = { + '200': _response_for_200, + '422': _response_for_422, +} +_all_accept_content_types = ( + 'application/json', +) + class BaseApi(api_client.Api): @typing.overload - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( + def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( + def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -97,16 +128,21 @@ def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( + def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def _cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -115,7 +151,7 @@ def _cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( skip_deserialization: bool = False, ): """ - Cancel Fine Tune + Cancel Batch Completion :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances @@ -124,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_fine_tune_id,): + for parameter in ( + request_path_batch_completion_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -132,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="put".upper(), + method='post'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -159,24 +197,32 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response -class CancelFineTuneV1LlmFineTunesFineTuneIdCancelPut(BaseApi): + +class CancelBatchCompletionV2BatchCompletionsBatchCompletionIdActionsCancelPost(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( + def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( + def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -184,16 +230,21 @@ def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( + def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -201,28 +252,32 @@ def cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put( timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( + return self._cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( path_params=path_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) -class ApiForput(BaseApi): + +class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @typing.overload - def put( + def post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def put( + def post( self, skip_deserialization: typing_extensions.Literal[True], path_params: RequestPathParams = frozendict.frozendict(), @@ -230,16 +285,21 @@ def put( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def put( + def post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def put( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def post( self, path_params: RequestPathParams = frozendict.frozendict(), accept_content_types: typing.Tuple[str] = _all_accept_content_types, @@ -247,10 +307,12 @@ def put( timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._cancel_fine_tune_v1_llm_fine_tunes_fine_tune_id_cancel_put_oapg( + return self._cancel_batch_completion_v2_batch_completions_batch_completion_id_actions_cancel_post_oapg( path_params=path_params, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_chat_completions/__init__.py b/launch/api_client/paths/v2_chat_completions/__init__.py new file mode 100644 index 00000000..1d410511 --- /dev/null +++ b/launch/api_client/paths/v2_chat_completions/__init__.py @@ -0,0 +1,7 @@ +# do not import all endpoints into this module because that uses a lot of memory and stack frames +# if you need the ability to import all endpoints from this module, import them with +# from launch.api_client.paths.v2_chat_completions import Api + +from launch.api_client.paths import PathValues + +path = PathValues.V2_CHAT_COMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v1_model_bundles/post.pyi b/launch/api_client/paths/v2_chat_completions/post.py similarity index 62% rename from launch/api_client/paths/v1_model_bundles/post.pyi rename to launch/api_client/paths/v2_chat_completions/post.py index 780b6144..7b4f0b21 100644 --- a/launch/api_client/paths/v1_model_bundles/post.pyi +++ b/launch/api_client/paths/v2_chat_completions/post.py @@ -18,59 +18,128 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_model_bundle_v1_request import ( - CreateModelBundleV1Request, +from urllib3._collections import HTTPHeaderDict + +from launch.api_client import schemas # noqa: F401 +from launch.api_client import api_client, exceptions +from launch.api_client.model.chat_completion_v2_request import ( + ChatCompletionV2Request, ) -from launch_client.model.create_model_bundle_v1_response import ( - CreateModelBundleV1Response, +from launch.api_client.model.chat_completion_v2_stream_error_chunk import ( + ChatCompletionV2StreamErrorChunk, ) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict +from launch.api_client.model.create_chat_completion_response import ( + CreateChatCompletionResponse, +) +from launch.api_client.model.create_chat_completion_stream_response import ( + CreateChatCompletionStreamResponse, +) +from launch.api_client.model.http_validation_error import HTTPValidationError + +from . import path # body param -SchemaForRequestBodyApplicationJson = CreateModelBundleV1Request +SchemaForRequestBodyApplicationJson = ChatCompletionV2Request -request_body_create_model_bundle_v1_request = api_client.RequestBody( + +request_body_chat_completion_v2_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV1Response +_auth = [ + 'OAuth2PasswordBearer', + 'HTTPBasic', +] + + +class SchemaFor200ResponseBodyApplicationJson( + schemas.ComposedSchema, +): + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + CreateChatCompletionResponse, + CreateChatCompletionStreamResponse, + ChatCompletionV2StreamErrorChunk, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'SchemaFor200ResponseBodyApplicationJson': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError + @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) -_all_accept_content_types = ("application/json",) +_status_code_to_response = { + '200': _response_for_200, + '422': _response_for_422, +} +_all_accept_content_types = ( + 'application/json', +) + class BaseApi(api_client.Api): @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( + def _chat_completion_v2_chat_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -78,9 +147,12 @@ def _create_model_bundle_v1_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( + def _chat_completion_v2_chat_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -88,9 +160,13 @@ def _create_model_bundle_v1_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( + def _chat_completion_v2_chat_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -99,8 +175,9 @@ def _create_model_bundle_v1_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def _create_model_bundle_v1_model_bundles_post_oapg( + def _chat_completion_v2_chat_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -108,18 +185,22 @@ def _create_model_bundle_v1_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_model_bundle_v1_model_bundles_post_oapg( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def _chat_completion_v2_chat_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): """ - Create Model Bundle + Chat Completion :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances @@ -130,23 +211,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None - serialized_data = request_body_create_model_bundle_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + serialized_data = request_body_chat_completion_v2_request.serialize(body, content_type) + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -165,15 +245,20 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response -class CreateModelBundleV1ModelBundlesPost(BaseApi): + +class ChatCompletionV2ChatCompletionsPost(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload - def create_model_bundle_v1_model_bundles_post( + def chat_completion_v2_chat_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -181,9 +266,12 @@ def create_model_bundle_v1_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def create_model_bundle_v1_model_bundles_post( + def chat_completion_v2_chat_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -191,9 +279,13 @@ def create_model_bundle_v1_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def create_model_bundle_v1_model_bundles_post( + def chat_completion_v2_chat_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -202,8 +294,9 @@ def create_model_bundle_v1_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def create_model_bundle_v1_model_bundles_post( + def chat_completion_v2_chat_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -211,25 +304,30 @@ def create_model_bundle_v1_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_model_bundle_v1_model_bundles_post( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def chat_completion_v2_chat_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._create_model_bundle_v1_model_bundles_post_oapg( + return self._chat_completion_v2_chat_completions_post_oapg( body=body, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @@ -242,7 +340,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( self, @@ -252,7 +353,11 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload def post( self, @@ -263,6 +368,7 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload def post( self, @@ -272,21 +378,27 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._create_model_bundle_v1_model_bundles_post_oapg( + return self._chat_completion_v2_chat_completions_post_oapg( body=body, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_completions/__init__.py b/launch/api_client/paths/v2_completions/__init__.py new file mode 100644 index 00000000..9b8209ec --- /dev/null +++ b/launch/api_client/paths/v2_completions/__init__.py @@ -0,0 +1,7 @@ +# do not import all endpoints into this module because that uses a lot of memory and stack frames +# if you need the ability to import all endpoints from this module, import them with +# from launch.api_client.paths.v2_completions import Api + +from launch.api_client.paths import PathValues + +path = PathValues.V2_COMPLETIONS \ No newline at end of file diff --git a/launch/api_client/paths/v1_llm_model_endpoints/post.pyi b/launch/api_client/paths/v2_completions/post.py similarity index 64% rename from launch/api_client/paths/v1_llm_model_endpoints/post.pyi rename to launch/api_client/paths/v2_completions/post.py index 15304381..1aa541d3 100644 --- a/launch/api_client/paths/v1_llm_model_endpoints/post.pyi +++ b/launch/api_client/paths/v2_completions/post.py @@ -18,59 +18,122 @@ import frozendict # noqa: F401 import typing_extensions # noqa: F401 import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_llm_model_endpoint_v1_request import ( - CreateLLMModelEndpointV1Request, +from urllib3._collections import HTTPHeaderDict + +from launch.api_client import schemas # noqa: F401 +from launch.api_client import api_client, exceptions +from launch.api_client.model.completion_v2_request import CompletionV2Request +from launch.api_client.model.completion_v2_stream_error_chunk import ( + CompletionV2StreamErrorChunk, ) -from launch_client.model.create_llm_model_endpoint_v1_response import ( - CreateLLMModelEndpointV1Response, +from launch.api_client.model.create_completion_response import ( + CreateCompletionResponse, ) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict +from launch.api_client.model.http_validation_error import HTTPValidationError + +from . import path # body param -SchemaForRequestBodyApplicationJson = CreateLLMModelEndpointV1Request +SchemaForRequestBodyApplicationJson = CompletionV2Request + -request_body_create_llm_model_endpoint_v1_request = api_client.RequestBody( +request_body_completion_v2_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) -SchemaFor200ResponseBodyApplicationJson = CreateLLMModelEndpointV1Response +_auth = [ + 'OAuth2PasswordBearer', + 'HTTPBasic', +] + + +class SchemaFor200ResponseBodyApplicationJson( + schemas.ComposedSchema, +): + + + class MetaOapg: + + @classmethod + @functools.lru_cache() + def any_of(cls): + # we need this here to make our import statements work + # we must store _composed_schemas in here so the code is only run + # when we invoke this method. If we kept this at the class + # level we would get an error because the class level + # code would be run when this module is imported, and these composed + # classes don't exist yet because their module has not finished + # loading + return [ + CreateCompletionResponse, + CompletionV2StreamErrorChunk, + ] + + + def __new__( + cls, + *_args: typing.Union[dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, bool, None, list, tuple, bytes, io.FileIO, io.BufferedReader, ], + _configuration: typing.Optional[schemas.Configuration] = None, + **kwargs: typing.Union[schemas.AnyTypeSchema, dict, frozendict.frozendict, str, date, datetime, uuid.UUID, int, float, decimal.Decimal, None, list, tuple, bytes], + ) -> 'SchemaFor200ResponseBodyApplicationJson': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + **kwargs, + ) + @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError + @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset + _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) -_all_accept_content_types = ("application/json",) +_status_code_to_response = { + '200': _response_for_200, + '422': _response_for_422, +} +_all_accept_content_types = ( + 'application/json', +) + class BaseApi(api_client.Api): @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( + def _completion_v2_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -78,9 +141,12 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( + def _completion_v2_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -88,9 +154,13 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( + def _completion_v2_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -99,8 +169,9 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( + def _completion_v2_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -108,18 +179,22 @@ def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_model_endpoint_v1_llm_model_endpoints_post_oapg( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def _completion_v2_completions_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): """ - Create Model Endpoint + Completion :param skip_deserialization: If true then api_response.response will be set but api_response.body and api_response.headers will not be deserialized into schema class instances @@ -130,23 +205,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None - serialized_data = request_body_create_llm_model_endpoint_v1_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + serialized_data = request_body_completion_v2_request.serialize(body, content_type) + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -165,15 +239,20 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response -class CreateModelEndpointV1LlmModelEndpointsPost(BaseApi): + +class CompletionV2CompletionsPost(BaseApi): # this class is used by api classes that refer to endpoints with operationId fn names @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( + def completion_v2_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: typing_extensions.Literal["application/json"] = ..., @@ -181,9 +260,12 @@ def create_model_endpoint_v1_llm_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( + def completion_v2_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -191,9 +273,13 @@ def create_model_endpoint_v1_llm_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( + def completion_v2_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], skip_deserialization: typing_extensions.Literal[True], @@ -202,8 +288,9 @@ def create_model_endpoint_v1_llm_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload - def create_model_endpoint_v1_llm_model_endpoints_post( + def completion_v2_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], content_type: str = ..., @@ -211,25 +298,30 @@ def create_model_endpoint_v1_llm_model_endpoints_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_model_endpoint_v1_llm_model_endpoints_post( + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + + def completion_v2_completions_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._create_model_endpoint_v1_llm_model_endpoints_post_oapg( + return self._completion_v2_completions_post_oapg( body=body, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + class ApiForpost(BaseApi): # this class is used by api classes that refer to endpoints by path and http method names @@ -242,7 +334,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( self, @@ -252,7 +347,11 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + + @typing.overload def post( self, @@ -263,6 +362,7 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, ) -> api_client.ApiResponseWithoutDeserialization: ... + @typing.overload def post( self, @@ -272,21 +372,27 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... + def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = False, ): - return self._create_model_endpoint_v1_llm_model_endpoints_post_oapg( + return self._completion_v2_completions_post_oapg( body=body, content_type=content_type, accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_model_bundles/__init__.py b/launch/api_client/paths/v2_model_bundles/__init__.py index 4de08029..a34ff03c 100644 --- a/launch/api_client/paths/v2_model_bundles/__init__.py +++ b/launch/api_client/paths/v2_model_bundles/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V2_MODELBUNDLES +path = PathValues.V2_MODELBUNDLES \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles/get.py b/launch/api_client/paths/v2_model_bundles/get.py index e9608072..61b55b48 100644 --- a/launch/api_client/paths/v2_model_bundles/get.py +++ b/launch/api_client/paths/v2_model_bundles/get.py @@ -31,19 +31,39 @@ from . import path # Query params -ModelNameSchema = schemas.StrSchema + + +class ModelNameSchema( + schemas.StrBase, + schemas.NoneBase, + schemas.Schema, + schemas.NoneStrMixin +): + + + def __new__( + cls, + *_args: typing.Union[None, str, ], + _configuration: typing.Optional[schemas.Configuration] = None, + ) -> 'ModelNameSchema': + return super().__new__( + cls, + *_args, + _configuration=_configuration, + ) OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) +RequestRequiredQueryParams = typing_extensions.TypedDict( + 'RequestRequiredQueryParams', + { + } +) RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", + 'RequestOptionalQueryParams', { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], + 'model_name': typing.Union[ModelNameSchema, None, str, ], + 'order_by': typing.Union[OrderBySchema, ], }, - total=False, + total=False ) @@ -64,7 +84,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ListModelBundlesV2Response @@ -72,14 +93,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -88,21 +112,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -114,8 +143,9 @@ def _list_model_bundles_v2_model_bundles_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _list_model_bundles_v2_model_bundles_get_oapg( @@ -125,8 +155,7 @@ def _list_model_bundles_v2_model_bundles_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _list_model_bundles_v2_model_bundles_get_oapg( @@ -136,8 +165,10 @@ def _list_model_bundles_v2_model_bundles_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _list_model_bundles_v2_model_bundles_get_oapg( self, @@ -174,11 +205,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -195,7 +226,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -211,8 +246,9 @@ def list_model_bundles_v2_model_bundles_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def list_model_bundles_v2_model_bundles_get( @@ -222,8 +258,7 @@ def list_model_bundles_v2_model_bundles_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def list_model_bundles_v2_model_bundles_get( @@ -233,8 +268,10 @@ def list_model_bundles_v2_model_bundles_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def list_model_bundles_v2_model_bundles_get( self, @@ -249,7 +286,7 @@ def list_model_bundles_v2_model_bundles_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -264,8 +301,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -275,8 +313,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -286,8 +323,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -302,5 +341,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_model_bundles/get.pyi b/launch/api_client/paths/v2_model_bundles/get.pyi deleted file mode 100644 index 662b5c99..00000000 --- a/launch/api_client/paths/v2_model_bundles/get.pyi +++ /dev/null @@ -1,269 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.list_model_bundles_v2_response import ( - ListModelBundlesV2Response, -) -from launch_client.model.model_bundle_order_by import ModelBundleOrderBy -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelNameSchema = schemas.StrSchema -OrderBySchema = ModelBundleOrderBy -RequestRequiredQueryParams = typing_extensions.TypedDict("RequestRequiredQueryParams", {}) -RequestOptionalQueryParams = typing_extensions.TypedDict( - "RequestOptionalQueryParams", - { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], - "order_by": typing.Union[OrderBySchema,], - }, - total=False, -) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - explode=True, -) -request_query_order_by = api_client.QueryParameter( - name="order_by", - style=api_client.ParameterStyle.FORM, - schema=OrderBySchema, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ListModelBundlesV2Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _list_model_bundles_v2_model_bundles_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - List Model Bundles - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in ( - request_query_model_name, - request_query_order_by, - ): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class ListModelBundlesV2ModelBundlesGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def list_model_bundles_v2_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def list_model_bundles_v2_model_bundles_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def list_model_bundles_v2_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def list_model_bundles_v2_model_bundles_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v2_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._list_model_bundles_v2_model_bundles_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v2_model_bundles/post.py b/launch/api_client/paths/v2_model_bundles/post.py index 75a2ecd2..995ad152 100644 --- a/launch/api_client/paths/v2_model_bundles/post.py +++ b/launch/api_client/paths/v2_model_bundles/post.py @@ -38,12 +38,14 @@ request_body_create_model_bundle_v2_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV2Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _create_model_bundle_v2_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _create_model_bundle_v2_model_bundles_post_oapg( @@ -106,8 +117,10 @@ def _create_model_bundle_v2_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _create_model_bundle_v2_model_bundles_post_oapg( @@ -118,8 +131,7 @@ def _create_model_bundle_v2_model_bundles_post_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _create_model_bundle_v2_model_bundles_post_oapg( @@ -130,13 +142,15 @@ def _create_model_bundle_v2_model_bundles_post_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _create_model_bundle_v2_model_bundles_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_create_model_bundle_v2_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def create_model_bundle_v2_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def create_model_bundle_v2_model_bundles_post( @@ -218,8 +236,10 @@ def create_model_bundle_v2_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def create_model_bundle_v2_model_bundles_post( @@ -230,8 +250,7 @@ def create_model_bundle_v2_model_bundles_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def create_model_bundle_v2_model_bundles_post( @@ -242,13 +261,15 @@ def create_model_bundle_v2_model_bundles_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def create_model_bundle_v2_model_bundles_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def create_model_bundle_v2_model_bundles_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_model_bundles/post.pyi b/launch/api_client/paths/v2_model_bundles/post.pyi deleted file mode 100644 index d0eaf62f..00000000 --- a/launch/api_client/paths/v2_model_bundles/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.create_model_bundle_v2_request import ( - CreateModelBundleV2Request, -) -from launch_client.model.create_model_bundle_v2_response import ( - CreateModelBundleV2Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CreateModelBundleV2Request - -request_body_create_model_bundle_v2_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV2Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _create_model_bundle_v2_model_bundles_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Create Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_create_model_bundle_v2_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CreateModelBundleV2ModelBundlesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def create_model_bundle_v2_model_bundles_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_bundle_v2_model_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._create_model_bundle_v2_model_bundles_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v2_model_bundles_clone_with_changes/__init__.py b/launch/api_client/paths/v2_model_bundles_clone_with_changes/__init__.py index 1a69800e..ac407cd6 100644 --- a/launch/api_client/paths/v2_model_bundles_clone_with_changes/__init__.py +++ b/launch/api_client/paths/v2_model_bundles_clone_with_changes/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES +path = PathValues.V2_MODELBUNDLES_CLONEWITHCHANGES \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.py b/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.py index 59c29b33..e09e518a 100644 --- a/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.py +++ b/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.py @@ -38,12 +38,14 @@ request_body_clone_model_bundle_v2_request = api_client.RequestBody( content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaForRequestBodyApplicationJson), }, required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV2Response @@ -51,14 +53,17 @@ @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -67,21 +72,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -94,8 +104,9 @@ def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oa stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( @@ -106,8 +117,10 @@ def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oa stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( @@ -118,8 +131,7 @@ def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oa accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( @@ -130,13 +142,15 @@ def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oa stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -154,23 +168,22 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) if body is schemas.unset: raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) + 'The required body parameter has an invalid value of: unset. Set a valid value instead') _fields = None _body = None serialized_data = request_body_clone_model_bundle_v2_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] + _headers.add('Content-Type', content_type) + if 'fields' in serialized_data: + _fields = serialized_data['fields'] + elif 'body' in serialized_data: + _body = serialized_data['body'] response = self.api_client.call_api( resource_path=used_path, - method="post".upper(), + method='post'.upper(), headers=_headers, fields=_fields, body=_body, @@ -189,7 +202,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -206,8 +223,9 @@ def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( @@ -218,8 +236,10 @@ def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( @@ -230,8 +250,7 @@ def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( @@ -242,13 +261,15 @@ def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -260,7 +281,7 @@ def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -276,8 +297,9 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def post( @@ -288,8 +310,10 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... + @typing.overload def post( @@ -300,8 +324,7 @@ def post( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def post( @@ -312,13 +335,15 @@ def post( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def post( self, body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", + content_type: str = 'application/json', accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, @@ -330,5 +355,7 @@ def post( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.pyi b/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.pyi deleted file mode 100644 index 498ec820..00000000 --- a/launch/api_client/paths/v2_model_bundles_clone_with_changes/post.pyi +++ /dev/null @@ -1,292 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.clone_model_bundle_v2_request import ( - CloneModelBundleV2Request, -) -from launch_client.model.create_model_bundle_v2_response import ( - CreateModelBundleV2Response, -) -from launch_client.model.http_validation_error import HTTPValidationError -from urllib3._collections import HTTPHeaderDict - -# body param -SchemaForRequestBodyApplicationJson = CloneModelBundleV2Request - -request_body_clone_model_bundle_v2_request = api_client.RequestBody( - content={ - "application/json": api_client.MediaType(schema=SchemaForRequestBodyApplicationJson), - }, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = CreateModelBundleV2Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Clone Model Bundle With Changes - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - used_path = path.value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - if body is schemas.unset: - raise exceptions.ApiValueError( - "The required body parameter has an invalid value of: unset. Set a valid value instead" - ) - _fields = None - _body = None - serialized_data = request_body_clone_model_bundle_v2_request.serialize(body, content_type) - _headers.add("Content-Type", content_type) - if "fields" in serialized_data: - _fields = serialized_data["fields"] - elif "body" in serialized_data: - _body = serialized_data["body"] - response = self.api_client.call_api( - resource_path=used_path, - method="post".upper(), - headers=_headers, - fields=_fields, - body=_body, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class CloneModelBundleWithChangesV2ModelBundlesCloneWithChangesPost(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForpost(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: typing_extensions.Literal["application/json"] = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - skip_deserialization: typing_extensions.Literal[True], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = ..., - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def post( - self, - body: typing.Union[SchemaForRequestBodyApplicationJson,], - content_type: str = "application/json", - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post_oapg( - body=body, - content_type=content_type, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v2_model_bundles_latest/__init__.py b/launch/api_client/paths/v2_model_bundles_latest/__init__.py index b8ce06cc..190c18d6 100644 --- a/launch/api_client/paths/v2_model_bundles_latest/__init__.py +++ b/launch/api_client/paths/v2_model_bundles_latest/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V2_MODELBUNDLES_LATEST +path = PathValues.V2_MODELBUNDLES_LATEST \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles_latest/get.py b/launch/api_client/paths/v2_model_bundles_latest/get.py index bcb75db6..54ee64b1 100644 --- a/launch/api_client/paths/v2_model_bundles_latest/get.py +++ b/launch/api_client/paths/v2_model_bundles_latest/get.py @@ -32,15 +32,17 @@ # Query params ModelNameSchema = schemas.StrSchema RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", + 'RequestRequiredQueryParams', + { + 'model_name': typing.Union[ModelNameSchema, str, ], + } +) +RequestOptionalQueryParams = typing_extensions.TypedDict( + 'RequestOptionalQueryParams', { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], }, + total=False ) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): @@ -55,7 +57,8 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) explode=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ModelBundleV2Response @@ -63,14 +66,17 @@ class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams) @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -79,21 +85,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -105,8 +116,9 @@ def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( @@ -116,8 +128,7 @@ def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( @@ -127,8 +138,10 @@ def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( self, @@ -148,7 +161,9 @@ class instances used_path = path.value prefix_separator_iterator = None - for parameter in (request_query_model_name,): + for parameter in ( + request_query_model_name, + ): parameter_data = query_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -162,11 +177,11 @@ class instances # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -183,7 +198,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -199,8 +218,9 @@ def get_latest_model_bundle_v2_model_bundles_latest_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_latest_model_bundle_v2_model_bundles_latest_get( @@ -210,8 +230,7 @@ def get_latest_model_bundle_v2_model_bundles_latest_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_latest_model_bundle_v2_model_bundles_latest_get( @@ -221,8 +240,10 @@ def get_latest_model_bundle_v2_model_bundles_latest_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_latest_model_bundle_v2_model_bundles_latest_get( self, @@ -237,7 +258,7 @@ def get_latest_model_bundle_v2_model_bundles_latest_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -252,8 +273,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -263,8 +285,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -274,8 +295,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -290,5 +313,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_model_bundles_latest/get.pyi b/launch/api_client/paths/v2_model_bundles_latest/get.pyi deleted file mode 100644 index edd6dda8..00000000 --- a/launch/api_client/paths/v2_model_bundles_latest/get.pyi +++ /dev/null @@ -1,255 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.model_bundle_v2_response import ModelBundleV2Response -from urllib3._collections import HTTPHeaderDict - -# Query params -ModelNameSchema = schemas.StrSchema -RequestRequiredQueryParams = typing_extensions.TypedDict( - "RequestRequiredQueryParams", - { - "model_name": typing.Union[ - ModelNameSchema, - str, - ], - }, -) -RequestOptionalQueryParams = typing_extensions.TypedDict("RequestOptionalQueryParams", {}, total=False) - -class RequestQueryParams(RequestRequiredQueryParams, RequestOptionalQueryParams): - pass - -request_query_model_name = api_client.QueryParameter( - name="model_name", - style=api_client.ParameterStyle.FORM, - schema=ModelNameSchema, - required=True, - explode=True, -) -SchemaFor200ResponseBodyApplicationJson = ModelBundleV2Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Latest Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestQueryParams, query_params) - used_path = path.value - - prefix_separator_iterator = None - for parameter in (request_query_model_name,): - parameter_data = query_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - if prefix_separator_iterator is None: - prefix_separator_iterator = parameter.get_prefix_separator_iterator() - serialized_data = parameter.serialize(parameter_data, prefix_separator_iterator) - for serialized_value in serialized_data.values(): - used_path += serialized_value - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetLatestModelBundleV2ModelBundlesLatestGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_latest_model_bundle_v2_model_bundles_latest_get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - query_params: RequestQueryParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_latest_model_bundle_v2_model_bundles_latest_get_oapg( - query_params=query_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/paths/v2_model_bundles_model_bundle_id/__init__.py b/launch/api_client/paths/v2_model_bundles_model_bundle_id/__init__.py index df3c2a79..783f2147 100644 --- a/launch/api_client/paths/v2_model_bundles_model_bundle_id/__init__.py +++ b/launch/api_client/paths/v2_model_bundles_model_bundle_id/__init__.py @@ -4,4 +4,4 @@ from launch.api_client.paths import PathValues -path = PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID +path = PathValues.V2_MODELBUNDLES_MODEL_BUNDLE_ID \ No newline at end of file diff --git a/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.py b/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.py index 729ad8c1..91c9f870 100644 --- a/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.py +++ b/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.py @@ -32,15 +32,17 @@ # Path params ModelBundleIdSchema = schemas.StrSchema RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", + 'RequestRequiredPathParams', + { + 'model_bundle_id': typing.Union[ModelBundleIdSchema, str, ], + } +) +RequestOptionalPathParams = typing_extensions.TypedDict( + 'RequestOptionalPathParams', { - "model_bundle_id": typing.Union[ - ModelBundleIdSchema, - str, - ], }, + total=False ) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @@ -54,7 +56,8 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): required=True, ) _auth = [ - "HTTPBasic", + 'OAuth2PasswordBearer', + 'HTTPBasic', ] SchemaFor200ResponseBodyApplicationJson = ModelBundleV2Response @@ -62,14 +65,17 @@ class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): @dataclass class ApiResponseFor200(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor200ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_200 = api_client.OpenApiResponse( response_cls=ApiResponseFor200, content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor200ResponseBodyApplicationJson), }, ) SchemaFor422ResponseBodyApplicationJson = HTTPValidationError @@ -78,21 +84,26 @@ class ApiResponseFor200(api_client.ApiResponse): @dataclass class ApiResponseFor422(api_client.ApiResponse): response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] + body: typing.Union[ + SchemaFor422ResponseBodyApplicationJson, + ] headers: schemas.Unset = schemas.unset _response_for_422 = api_client.OpenApiResponse( response_cls=ApiResponseFor422, content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), + 'application/json': api_client.MediaType( + schema=SchemaFor422ResponseBodyApplicationJson), }, ) _status_code_to_response = { - "200": _response_for_200, - "422": _response_for_422, + '200': _response_for_200, + '422': _response_for_422, } -_all_accept_content_types = ("application/json",) +_all_accept_content_types = ( + 'application/json', +) class BaseApi(api_client.Api): @@ -104,8 +115,9 @@ def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( @@ -115,8 +127,7 @@ def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( @@ -126,8 +137,10 @@ def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( self, @@ -147,7 +160,9 @@ class instances used_path = path.value _path_params = {} - for parameter in (request_path_model_bundle_id,): + for parameter in ( + request_path_model_bundle_id, + ): parameter_data = path_params.get(parameter.name, schemas.unset) if parameter_data is schemas.unset: continue @@ -155,17 +170,17 @@ class instances _path_params.update(serialized_data) for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) + used_path = used_path.replace('{%s}' % k, v) _headers = HTTPHeaderDict() # TODO add cookie handling if accept_content_types: for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) + _headers.add('Accept', accept_content_type) response = self.api_client.call_api( resource_path=used_path, - method="get".upper(), + method='get'.upper(), headers=_headers, auth_settings=_auth, stream=stream, @@ -182,7 +197,11 @@ class instances api_response = api_client.ApiResponseWithoutDeserialization(response=response) if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) + raise exceptions.ApiException( + status=response.status, + reason=response.reason, + api_response=api_response + ) return api_response @@ -198,8 +217,9 @@ def get_model_bundle_v2_model_bundles_model_bundle_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get_model_bundle_v2_model_bundles_model_bundle_id_get( @@ -209,8 +229,7 @@ def get_model_bundle_v2_model_bundles_model_bundle_id_get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get_model_bundle_v2_model_bundles_model_bundle_id_get( @@ -220,8 +239,10 @@ def get_model_bundle_v2_model_bundles_model_bundle_id_get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get_model_bundle_v2_model_bundles_model_bundle_id_get( self, @@ -236,7 +257,7 @@ def get_model_bundle_v2_model_bundles_model_bundle_id_get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) @@ -251,8 +272,9 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + ]: ... @typing.overload def get( @@ -262,8 +284,7 @@ def get( accept_content_types: typing.Tuple[str] = _all_accept_content_types, stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: - ... + ) -> api_client.ApiResponseWithoutDeserialization: ... @typing.overload def get( @@ -273,8 +294,10 @@ def get( stream: bool = False, timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: - ... + ) -> typing.Union[ + ApiResponseFor200, + api_client.ApiResponseWithoutDeserialization, + ]: ... def get( self, @@ -289,5 +312,7 @@ def get( accept_content_types=accept_content_types, stream=stream, timeout=timeout, - skip_deserialization=skip_deserialization, + skip_deserialization=skip_deserialization ) + + diff --git a/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.pyi b/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.pyi deleted file mode 100644 index fedb2333..00000000 --- a/launch/api_client/paths/v2_model_bundles_model_bundle_id/get.pyi +++ /dev/null @@ -1,254 +0,0 @@ -# coding: utf-8 - -""" - - - Generated by: https://openapi-generator.tech -""" - -import decimal # noqa: F401 -import functools # noqa: F401 -import io # noqa: F401 -import re # noqa: F401 -import typing # noqa: F401 -import uuid # noqa: F401 -from dataclasses import dataclass -from datetime import date, datetime # noqa: F401 - -import frozendict # noqa: F401 -import typing_extensions # noqa: F401 -import urllib3 -from launch_client import schemas # noqa: F401 -from launch_client import api_client, exceptions -from launch_client.model.http_validation_error import HTTPValidationError -from launch_client.model.model_bundle_v2_response import ModelBundleV2Response -from urllib3._collections import HTTPHeaderDict - -# Path params -ModelBundleIdSchema = schemas.StrSchema -RequestRequiredPathParams = typing_extensions.TypedDict( - "RequestRequiredPathParams", - { - "model_bundle_id": typing.Union[ - ModelBundleIdSchema, - str, - ], - }, -) -RequestOptionalPathParams = typing_extensions.TypedDict("RequestOptionalPathParams", {}, total=False) - -class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams): - pass - -request_path_model_bundle_id = api_client.PathParameter( - name="model_bundle_id", - style=api_client.ParameterStyle.SIMPLE, - schema=ModelBundleIdSchema, - required=True, -) -SchemaFor200ResponseBodyApplicationJson = ModelBundleV2Response - -@dataclass -class ApiResponseFor200(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor200ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_200 = api_client.OpenApiResponse( - response_cls=ApiResponseFor200, - content={ - "application/json": api_client.MediaType(schema=SchemaFor200ResponseBodyApplicationJson), - }, -) -SchemaFor422ResponseBodyApplicationJson = HTTPValidationError - -@dataclass -class ApiResponseFor422(api_client.ApiResponse): - response: urllib3.HTTPResponse - body: typing.Union[SchemaFor422ResponseBodyApplicationJson,] - headers: schemas.Unset = schemas.unset - -_response_for_422 = api_client.OpenApiResponse( - response_cls=ApiResponseFor422, - content={ - "application/json": api_client.MediaType(schema=SchemaFor422ResponseBodyApplicationJson), - }, -) -_all_accept_content_types = ("application/json",) - -class BaseApi(api_client.Api): - @typing.overload - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def _get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - """ - Get Model Bundle - :param skip_deserialization: If true then api_response.response will be set but - api_response.body and api_response.headers will not be deserialized into schema - class instances - """ - self._verify_typed_dict_inputs_oapg(RequestPathParams, path_params) - used_path = path.value - - _path_params = {} - for parameter in (request_path_model_bundle_id,): - parameter_data = path_params.get(parameter.name, schemas.unset) - if parameter_data is schemas.unset: - continue - serialized_data = parameter.serialize(parameter_data) - _path_params.update(serialized_data) - - for k, v in _path_params.items(): - used_path = used_path.replace("{%s}" % k, v) - - _headers = HTTPHeaderDict() - # TODO add cookie handling - if accept_content_types: - for accept_content_type in accept_content_types: - _headers.add("Accept", accept_content_type) - - response = self.api_client.call_api( - resource_path=used_path, - method="get".upper(), - headers=_headers, - auth_settings=_auth, - stream=stream, - timeout=timeout, - ) - - if skip_deserialization: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - else: - response_for_status = _status_code_to_response.get(str(response.status)) - if response_for_status: - api_response = response_for_status.deserialize(response, self.api_client.configuration) - else: - api_response = api_client.ApiResponseWithoutDeserialization(response=response) - - if not 200 <= response.status <= 299: - raise exceptions.ApiException(status=response.status, reason=response.reason, api_response=api_response) - - return api_response - -class GetModelBundleV2ModelBundlesModelBundleIdGet(BaseApi): - # this class is used by api classes that refer to endpoints with operationId fn names - - @typing.overload - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get_model_bundle_v2_model_bundles_model_bundle_id_get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) - -class ApiForget(BaseApi): - # this class is used by api classes that refer to endpoints by path and http method names - - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: typing_extensions.Literal[False] = ..., - ) -> typing.Union[ApiResponseFor200,]: ... - @typing.overload - def get( - self, - skip_deserialization: typing_extensions.Literal[True], - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - ) -> api_client.ApiResponseWithoutDeserialization: ... - @typing.overload - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = ..., - ) -> typing.Union[ApiResponseFor200, api_client.ApiResponseWithoutDeserialization,]: ... - def get( - self, - path_params: RequestPathParams = frozendict.frozendict(), - accept_content_types: typing.Tuple[str] = _all_accept_content_types, - stream: bool = False, - timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None, - skip_deserialization: bool = False, - ): - return self._get_model_bundle_v2_model_bundles_model_bundle_id_get_oapg( - path_params=path_params, - accept_content_types=accept_content_types, - stream=stream, - timeout=timeout, - skip_deserialization=skip_deserialization, - ) diff --git a/launch/api_client/rest.py b/launch/api_client/rest.py index 2065508f..b09e2a34 100644 --- a/launch/api_client/rest.py +++ b/launch/api_client/rest.py @@ -24,6 +24,7 @@ class RESTClientObject(object): + def __init__(self, configuration, pools_size=4, maxsize=None): # urllib3.PoolManager will pass all kw parameters to connectionpool # https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501 @@ -46,13 +47,13 @@ def __init__(self, configuration, pools_size=4, maxsize=None): addition_pool_args = {} if configuration.assert_hostname is not None: - addition_pool_args["assert_hostname"] = configuration.assert_hostname # noqa: E501 + addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501 if configuration.retries is not None: - addition_pool_args["retries"] = configuration.retries + addition_pool_args['retries'] = configuration.retries if configuration.socket_options is not None: - addition_pool_args["socket_options"] = configuration.socket_options + addition_pool_args['socket_options'] = configuration.socket_options if maxsize is None: if configuration.connection_pool_maxsize is not None: @@ -71,7 +72,7 @@ def __init__(self, configuration, pools_size=4, maxsize=None): key_file=configuration.key_file, proxy_url=configuration.proxy, proxy_headers=configuration.proxy_headers, - **addition_pool_args, + **addition_pool_args ) else: self.pool_manager = urllib3.PoolManager( @@ -81,7 +82,7 @@ def __init__(self, configuration, pools_size=4, maxsize=None): ca_certs=ca_certs, cert_file=configuration.cert_file, key_file=configuration.key_file, - **addition_pool_args, + **addition_pool_args ) def request( @@ -112,10 +113,13 @@ def request( (connection, read) timeouts. """ method = method.upper() - assert method in ["GET", "HEAD", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"] + assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT', + 'PATCH', 'OPTIONS'] if fields and body: - raise ApiValueError("body parameter cannot be used with fields parameter.") + raise ApiValueError( + "body parameter cannot be used with fields parameter." + ) fields = fields or {} headers = headers or {} @@ -123,49 +127,53 @@ def request( if timeout: if isinstance(timeout, (int, float)): # noqa: E501,F821 timeout = urllib3.Timeout(total=timeout) - elif isinstance(timeout, tuple) and len(timeout) == 2: + elif (isinstance(timeout, tuple) and + len(timeout) == 2): timeout = urllib3.Timeout(connect=timeout[0], read=timeout[1]) try: # For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE` - if method in ["POST", "PUT", "PATCH", "OPTIONS", "DELETE"]: - if "Content-Type" not in headers and body is None: - r = self.pool_manager.request( - method, url, preload_content=not stream, timeout=timeout, headers=headers - ) - elif headers["Content-Type"] == "application/x-www-form-urlencoded": # noqa: E501 + if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']: + if 'Content-Type' not in headers and body is None: r = self.pool_manager.request( method, url, + preload_content=not stream, + timeout=timeout, + headers=headers + ) + elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501 + r = self.pool_manager.request( + method, url, body=body, fields=fields, encode_multipart=False, preload_content=not stream, timeout=timeout, - headers=headers, - ) - elif headers["Content-Type"] == "multipart/form-data": + headers=headers) + elif headers['Content-Type'] == 'multipart/form-data': # must del headers['Content-Type'], or the correct # Content-Type which generated by urllib3 will be # overwritten. - del headers["Content-Type"] + del headers['Content-Type'] r = self.pool_manager.request( - method, - url, + method, url, fields=fields, encode_multipart=True, preload_content=not stream, timeout=timeout, - headers=headers, - ) + headers=headers) # Pass a `string` parameter directly in the body to support # other content types than Json when `body` argument is # provided in serialized form elif isinstance(body, str) or isinstance(body, bytes): request_body = body r = self.pool_manager.request( - method, url, body=request_body, preload_content=not stream, timeout=timeout, headers=headers - ) + method, url, + body=request_body, + preload_content=not stream, + timeout=timeout, + headers=headers) else: # Cannot generate the request from given parameters msg = """Cannot prepare a request message for provided @@ -174,7 +182,10 @@ def request( raise ApiException(status=0, reason=msg) # For `GET`, `HEAD` else: - r = self.pool_manager.request(method, url, preload_content=not stream, timeout=timeout, headers=headers) + r = self.pool_manager.request(method, url, + preload_content=not stream, + timeout=timeout, + headers=headers) except urllib3.exceptions.SSLError as e: msg = "{0}\n{1}".format(type(e).__name__, str(e)) raise ApiException(status=0, reason=msg) @@ -185,23 +196,58 @@ def request( return r - def GET(self, url, headers=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("GET", url, headers=headers, stream=stream, timeout=timeout, fields=fields) - - def HEAD(self, url, headers=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("HEAD", url, headers=headers, stream=stream, timeout=timeout, fields=fields) - - def OPTIONS(self, url, headers=None, body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("OPTIONS", url, headers=headers, stream=stream, timeout=timeout, body=body, fields=fields) - - def DELETE(self, url, headers=None, body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("DELETE", url, headers=headers, stream=stream, timeout=timeout, body=body, fields=fields) - - def POST(self, url, headers=None, body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("POST", url, headers=headers, stream=stream, timeout=timeout, body=body, fields=fields) - - def PUT(self, url, headers=None, body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("PUT", url, headers=headers, stream=stream, timeout=timeout, body=body, fields=fields) - - def PATCH(self, url, headers=None, body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: - return self.request("PATCH", url, headers=headers, stream=stream, timeout=timeout, body=body, fields=fields) + def GET(self, url, headers=None, stream=False, + timeout=None, fields=None) -> urllib3.HTTPResponse: + return self.request("GET", url, + headers=headers, + stream=stream, + timeout=timeout, + fields=fields) + + def HEAD(self, url, headers=None, stream=False, + timeout=None, fields=None) -> urllib3.HTTPResponse: + return self.request("HEAD", url, + headers=headers, + stream=stream, + timeout=timeout, + fields=fields) + + def OPTIONS(self, url, headers=None, + body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: + return self.request("OPTIONS", url, + headers=headers, + stream=stream, + timeout=timeout, + body=body, fields=fields) + + def DELETE(self, url, headers=None, body=None, + stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: + return self.request("DELETE", url, + headers=headers, + stream=stream, + timeout=timeout, + body=body, fields=fields) + + def POST(self, url, headers=None, + body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: + return self.request("POST", url, + headers=headers, + stream=stream, + timeout=timeout, + body=body, fields=fields) + + def PUT(self, url, headers=None, + body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: + return self.request("PUT", url, + headers=headers, + stream=stream, + timeout=timeout, + body=body, fields=fields) + + def PATCH(self, url, headers=None, + body=None, stream=False, timeout=None, fields=None) -> urllib3.HTTPResponse: + return self.request("PATCH", url, + headers=headers, + stream=stream, + timeout=timeout, + body=body, fields=fields) diff --git a/launch/api_client/schemas.py b/launch/api_client/schemas.py index 3eaf4a2a..b69a624f 100644 --- a/launch/api_client/schemas.py +++ b/launch/api_client/schemas.py @@ -31,10 +31,8 @@ class Unset(object): An instance of this class is set as the default value for object type(dict) properties that are optional When a property has an unset value, that property will not be assigned in the dict """ - pass - unset = Unset() none_type = type(None) @@ -50,12 +48,12 @@ class FileIO(io.FileIO): def __new__(cls, _arg: typing.Union[io.FileIO, io.BufferedReader]): if isinstance(_arg, (io.FileIO, io.BufferedReader)): if _arg.closed: - raise ApiValueError("Invalid file state; file is closed and must be open") + raise ApiValueError('Invalid file state; file is closed and must be open') _arg.close() inst = super(FileIO, cls).__new__(cls, _arg.name) super(FileIO, inst).__init__(_arg.name) return inst - raise ApiValueError("FileIO must be passed _arg which contains the open file") + raise ApiValueError('FileIO must be passed _arg which contains the open file') def __init__(self, _arg: typing.Union[io.FileIO, io.BufferedReader]): pass @@ -79,16 +77,13 @@ class ValidationMetadata(frozendict.frozendict): """ A class storing metadata that is needed to validate OpenApi Schema payloads """ - def __new__( cls, - path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(["args[0]"]), + path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(['args[0]']), from_server: bool = False, configuration: typing.Optional[Configuration] = None, seen_classes: typing.FrozenSet[typing.Type] = frozenset(), - validated_path_to_schemas: typing.Dict[ - typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type] - ] = frozendict.frozendict(), + validated_path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]] = frozendict.frozendict() ): """ Args: @@ -115,7 +110,7 @@ def __new__( from_server=from_server, configuration=configuration, seen_classes=seen_classes, - validated_path_to_schemas=validated_path_to_schemas, + validated_path_to_schemas=validated_path_to_schemas ) def validation_ran_earlier(self, cls: type) -> bool: @@ -129,25 +124,23 @@ def validation_ran_earlier(self, cls: type) -> bool: @property def path_to_item(self) -> typing.Tuple[typing.Union[str, int], ...]: - return self.get("path_to_item") + return self.get('path_to_item') @property def from_server(self) -> bool: - return self.get("from_server") + return self.get('from_server') @property def configuration(self) -> typing.Optional[Configuration]: - return self.get("configuration") + return self.get('configuration') @property def seen_classes(self) -> typing.FrozenSet[typing.Type]: - return self.get("seen_classes") + return self.get('seen_classes') @property - def validated_path_to_schemas( - self, - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]]: - return self.get("validated_path_to_schemas") + def validated_path_to_schemas(self) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Type]]: + return self.get('validated_path_to_schemas') def add_deeper_validated_schemas(validation_metadata: ValidationMetadata, path_to_schemas: dict): @@ -157,7 +150,7 @@ def add_deeper_validated_schemas(validation_metadata: ValidationMetadata, path_t for path_to_item, schemas in validation_metadata.validated_path_to_schemas.items(): if len(path_to_item) < len(current_path_to_item): continue - path_begins_with_current_path = path_to_item[: len(current_path_to_item)] == current_path_to_item + path_begins_with_current_path = path_to_item[:len(current_path_to_item)] == current_path_to_item if path_begins_with_current_path: other_path_to_schemas[path_to_item] = schemas update(path_to_schemas, other_path_to_schemas) @@ -168,7 +161,6 @@ class Singleton: Enums and singletons are the same The same instance is returned for a given key of (cls, _arg) """ - _instances = {} def __new__(cls, _arg: typing.Any, **kwargs): @@ -192,15 +184,16 @@ def __new__(cls, _arg: typing.Any, **kwargs): def __repr__(self): if isinstance(self, NoneClass): - return f"<{self.__class__.__name__}: None>" + return f'<{self.__class__.__name__}: None>' elif isinstance(self, BoolClass): if bool(self): - return f"<{self.__class__.__name__}: True>" - return f"<{self.__class__.__name__}: False>" - return f"<{self.__class__.__name__}: {super().__repr__()}>" + return f'<{self.__class__.__name__}: True>' + return f'<{self.__class__.__name__}: False>' + return f'<{self.__class__.__name__}: {super().__repr__()}>' class classproperty: + def __init__(self, fget): self.fget = fget @@ -231,7 +224,7 @@ def __bool__(self) -> bool: for key, instance in self._instances.items(): if self is instance: return bool(key[1]) - raise ValueError("Unable to find the boolean value of this instance") + raise ValueError('Unable to find the boolean value of this instance') class MetaOapgTyped: @@ -241,39 +234,30 @@ class MetaOapgTyped: inclusive_minimum: typing.Union[int, float] max_items: int min_items: int - discriminator: typing.Dict[str, typing.Dict[str, typing.Type["Schema"]]] + discriminator: typing.Dict[str, typing.Dict[str, typing.Type['Schema']]] + class properties: # to hold object properties pass - additional_properties: typing.Optional[typing.Type["Schema"]] + additional_properties: typing.Optional[typing.Type['Schema']] max_properties: int min_properties: int - all_of: typing.List[typing.Type["Schema"]] - one_of: typing.List[typing.Type["Schema"]] - any_of: typing.List[typing.Type["Schema"]] - not_schema: typing.Type["Schema"] + all_of: typing.List[typing.Type['Schema']] + one_of: typing.List[typing.Type['Schema']] + any_of: typing.List[typing.Type['Schema']] + not_schema: typing.Type['Schema'] max_length: int min_length: int - items: typing.Type["Schema"] + items: typing.Type['Schema'] class Schema: """ the base class of all swagger/openapi schemas/models """ - - __inheritable_primitive_types_set = { - decimal.Decimal, - str, - tuple, - frozendict.frozendict, - FileIO, - bytes, - BoolClass, - NoneClass, - } + __inheritable_primitive_types_set = {decimal.Decimal, str, tuple, frozendict.frozendict, FileIO, bytes, BoolClass, NoneClass} _types: typing.Set[typing.Type] MetaOapg = MetaOapgTyped @@ -288,9 +272,7 @@ def __get_valid_classes_phrase(input_classes): return "is one of [{0}]".format(", ".join(all_class_names)) @staticmethod - def _get_class_oapg( - item_cls: typing.Union[types.FunctionType, staticmethod, typing.Type["Schema"]] - ) -> typing.Type["Schema"]: + def _get_class_oapg(item_cls: typing.Union[types.FunctionType, staticmethod, typing.Type['Schema']]) -> typing.Type['Schema']: if isinstance(item_cls, types.FunctionType): # referenced schema return item_cls() @@ -300,7 +282,9 @@ def _get_class_oapg( return item_cls @classmethod - def __type_error_message(cls, var_value=None, var_name=None, valid_classes=None, key_type=None): + def __type_error_message( + cls, var_value=None, var_name=None, valid_classes=None, key_type=None + ): """ Keyword Args: var_value (any): the variable which has the type_error @@ -343,10 +327,7 @@ def _validate_oapg( cls, arg, validation_metadata: ValidationMetadata, - ) -> typing.Dict[ - typing.Tuple[typing.Union[str, int], ...], - typing.Set[typing.Union["Schema", str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]], - ]: + ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: """ Schema _validate_oapg All keyword validation except for type checking was done in calling stack frames @@ -375,9 +356,7 @@ def _validate_oapg( @staticmethod def _process_schema_classes_oapg( - schema_classes: typing.Set[ - typing.Union["Schema", str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple] - ] + schema_classes: typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]] ): """ Processes and mutates schema_classes @@ -397,8 +376,10 @@ def _process_schema_classes_oapg( @classmethod def __get_new_cls( - cls, arg, validation_metadata: ValidationMetadata - ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]]: + cls, + arg, + validation_metadata: ValidationMetadata + ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']]: """ Make a new dynamic class and return an instance of that class We are making an instance of cls, but instead of making cls @@ -437,7 +418,8 @@ def __get_new_cls( 3. N number of schema classes, classes in path_to_schemas: BoolClass/NoneClass/tuple/frozendict.frozendict/str/Decimal/bytes/FileIo """ cls._process_schema_classes_oapg(schema_classes) - enum_schema = any(issubclass(this_cls, EnumBase) for this_cls in schema_classes) + enum_schema = any( + issubclass(this_cls, EnumBase) for this_cls in schema_classes) inheritable_primitive_type = schema_classes.intersection(cls.__inheritable_primitive_types_set) chosen_schema_classes = schema_classes - inheritable_primitive_type suffix = tuple(inheritable_primitive_type) @@ -445,7 +427,7 @@ def __get_new_cls( suffix = (Singleton,) + suffix used_classes = tuple(sorted(chosen_schema_classes, key=lambda a_cls: a_cls.__name__)) + suffix - mfg_cls = get_new_class(class_name="DynamicSchema", bases=used_classes) + mfg_cls = get_new_class(class_name='DynamicSchema', bases=used_classes) path_to_schemas[path] = mfg_cls return path_to_schemas @@ -455,7 +437,7 @@ def _get_new_instance_without_conversion_oapg( cls, arg: typing.Any, path_to_item: typing.Tuple[typing.Union[str, int], ...], - path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]], + path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']] ): # We have a Dynamic class and we are making an instance of it if issubclass(cls, frozendict.frozendict) and issubclass(cls, DictBase): @@ -484,16 +466,16 @@ def from_openapi_data_oapg( decimal.Decimal, bool, None, - "Schema", + 'Schema', dict, frozendict.frozendict, tuple, list, io.FileIO, io.BufferedReader, - bytes, + bytes ], - _configuration: typing.Optional[Configuration], + _configuration: typing.Optional[Configuration] ): """ Schema from_openapi_data_oapg @@ -502,12 +484,13 @@ def from_openapi_data_oapg( validated_path_to_schemas = {} arg = cast_to_allowed_types(arg, from_server, validated_path_to_schemas) validation_metadata = ValidationMetadata( - from_server=from_server, configuration=_configuration, validated_path_to_schemas=validated_path_to_schemas - ) + from_server=from_server, configuration=_configuration, validated_path_to_schemas=validated_path_to_schemas) path_to_schemas = cls.__get_new_cls(arg, validation_metadata) new_cls = path_to_schemas[validation_metadata.path_to_item] new_inst = new_cls._get_new_instance_without_conversion_oapg( - arg, validation_metadata.path_to_item, path_to_schemas + arg, + validation_metadata.path_to_item, + path_to_schemas ) return new_inst @@ -524,41 +507,7 @@ def __get_input_dict(*args, **kwargs) -> frozendict.frozendict: def __remove_unsets(kwargs): return {key: val for key, val in kwargs.items() if val is not unset} - def __new__( - cls, - *_args: typing.Union[ - dict, - frozendict.frozendict, - list, - tuple, - decimal.Decimal, - float, - int, - str, - date, - datetime, - bool, - None, - "Schema", - ], - _configuration: typing.Optional[Configuration] = None, - **kwargs: typing.Union[ - dict, - frozendict.frozendict, - list, - tuple, - decimal.Decimal, - float, - int, - str, - date, - datetime, - bool, - None, - "Schema", - Unset, - ], - ): + def __new__(cls, *_args: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema'], _configuration: typing.Optional[Configuration] = None, **kwargs: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema', Unset]): """ Schema __new__ @@ -573,59 +522,35 @@ def __new__( """ __kwargs = cls.__remove_unsets(kwargs) if not _args and not __kwargs: - raise TypeError("No input given. args or kwargs must be given.") + raise TypeError( + 'No input given. args or kwargs must be given.' + ) if not __kwargs and _args and not isinstance(_args[0], dict): __arg = _args[0] else: __arg = cls.__get_input_dict(*_args, **__kwargs) __from_server = False __validated_path_to_schemas = {} - __arg = cast_to_allowed_types(__arg, __from_server, __validated_path_to_schemas) + __arg = cast_to_allowed_types( + __arg, __from_server, __validated_path_to_schemas) __validation_metadata = ValidationMetadata( - configuration=_configuration, - from_server=__from_server, - validated_path_to_schemas=__validated_path_to_schemas, - ) + configuration=_configuration, from_server=__from_server, validated_path_to_schemas=__validated_path_to_schemas) __path_to_schemas = cls.__get_new_cls(__arg, __validation_metadata) __new_cls = __path_to_schemas[__validation_metadata.path_to_item] return __new_cls._get_new_instance_without_conversion_oapg( - __arg, __validation_metadata.path_to_item, __path_to_schemas + __arg, + __validation_metadata.path_to_item, + __path_to_schemas ) def __init__( self, *_args: typing.Union[ - dict, - frozendict.frozendict, - list, - tuple, - decimal.Decimal, - float, - int, - str, - date, - datetime, - bool, - None, - "Schema", - ], + dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema'], _configuration: typing.Optional[Configuration] = None, **kwargs: typing.Union[ - dict, - frozendict.frozendict, - list, - tuple, - decimal.Decimal, - float, - int, - str, - date, - datetime, - bool, - None, - "Schema", - Unset, - ], + dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, 'Schema', Unset + ] ): """ this is needed to fix 'Unexpected argument' warning in pycharm @@ -635,7 +560,6 @@ def __init__( """ pass - """ import itertools data_types = ('None', 'FrozenDict', 'Tuple', 'Str', 'Decimal', 'Bool') @@ -665,399 +589,269 @@ def __init__( BoolMixin = BoolClass BytesMixin = bytes FileMixin = FileIO - # qty 2 class BinaryMixin(bytes, FileIO): pass - class NoneFrozenDictMixin(NoneClass, frozendict.frozendict): pass - class NoneTupleMixin(NoneClass, tuple): pass - class NoneStrMixin(NoneClass, str): pass - class NoneDecimalMixin(NoneClass, decimal.Decimal): pass - class NoneBoolMixin(NoneClass, BoolClass): pass - class FrozenDictTupleMixin(frozendict.frozendict, tuple): pass - class FrozenDictStrMixin(frozendict.frozendict, str): pass - class FrozenDictDecimalMixin(frozendict.frozendict, decimal.Decimal): pass - class FrozenDictBoolMixin(frozendict.frozendict, BoolClass): pass - class TupleStrMixin(tuple, str): pass - class TupleDecimalMixin(tuple, decimal.Decimal): pass - class TupleBoolMixin(tuple, BoolClass): pass - class StrDecimalMixin(str, decimal.Decimal): pass - class StrBoolMixin(str, BoolClass): pass - class DecimalBoolMixin(decimal.Decimal, BoolClass): pass - # qty 3 class NoneFrozenDictTupleMixin(NoneClass, frozendict.frozendict, tuple): pass - class NoneFrozenDictStrMixin(NoneClass, frozendict.frozendict, str): pass - class NoneFrozenDictDecimalMixin(NoneClass, frozendict.frozendict, decimal.Decimal): pass - class NoneFrozenDictBoolMixin(NoneClass, frozendict.frozendict, BoolClass): pass - class NoneTupleStrMixin(NoneClass, tuple, str): pass - class NoneTupleDecimalMixin(NoneClass, tuple, decimal.Decimal): pass - class NoneTupleBoolMixin(NoneClass, tuple, BoolClass): pass - class NoneStrDecimalMixin(NoneClass, str, decimal.Decimal): pass - class NoneStrBoolMixin(NoneClass, str, BoolClass): pass - class NoneDecimalBoolMixin(NoneClass, decimal.Decimal, BoolClass): pass - class FrozenDictTupleStrMixin(frozendict.frozendict, tuple, str): pass - class FrozenDictTupleDecimalMixin(frozendict.frozendict, tuple, decimal.Decimal): pass - class FrozenDictTupleBoolMixin(frozendict.frozendict, tuple, BoolClass): pass - class FrozenDictStrDecimalMixin(frozendict.frozendict, str, decimal.Decimal): pass - class FrozenDictStrBoolMixin(frozendict.frozendict, str, BoolClass): pass - class FrozenDictDecimalBoolMixin(frozendict.frozendict, decimal.Decimal, BoolClass): pass - class TupleStrDecimalMixin(tuple, str, decimal.Decimal): pass - class TupleStrBoolMixin(tuple, str, BoolClass): pass - class TupleDecimalBoolMixin(tuple, decimal.Decimal, BoolClass): pass - class StrDecimalBoolMixin(str, decimal.Decimal, BoolClass): pass - # qty 4 class NoneFrozenDictTupleStrMixin(NoneClass, frozendict.frozendict, tuple, str): pass - class NoneFrozenDictTupleDecimalMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal): pass - class NoneFrozenDictTupleBoolMixin(NoneClass, frozendict.frozendict, tuple, BoolClass): pass - class NoneFrozenDictStrDecimalMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal): pass - class NoneFrozenDictStrBoolMixin(NoneClass, frozendict.frozendict, str, BoolClass): pass - class NoneFrozenDictDecimalBoolMixin(NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass): pass - class NoneTupleStrDecimalMixin(NoneClass, tuple, str, decimal.Decimal): pass - class NoneTupleStrBoolMixin(NoneClass, tuple, str, BoolClass): pass - class NoneTupleDecimalBoolMixin(NoneClass, tuple, decimal.Decimal, BoolClass): pass - class NoneStrDecimalBoolMixin(NoneClass, str, decimal.Decimal, BoolClass): pass - class FrozenDictTupleStrDecimalMixin(frozendict.frozendict, tuple, str, decimal.Decimal): pass - class FrozenDictTupleStrBoolMixin(frozendict.frozendict, tuple, str, BoolClass): pass - class FrozenDictTupleDecimalBoolMixin(frozendict.frozendict, tuple, decimal.Decimal, BoolClass): pass - class FrozenDictStrDecimalBoolMixin(frozendict.frozendict, str, decimal.Decimal, BoolClass): pass - class TupleStrDecimalBoolMixin(tuple, str, decimal.Decimal, BoolClass): pass - # qty 5 class NoneFrozenDictTupleStrDecimalMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal): pass - class NoneFrozenDictTupleStrBoolMixin(NoneClass, frozendict.frozendict, tuple, str, BoolClass): pass - class NoneFrozenDictTupleDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass): pass - class NoneFrozenDictStrDecimalBoolMixin(NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass): pass - class NoneTupleStrDecimalBoolMixin(NoneClass, tuple, str, decimal.Decimal, BoolClass): pass - class FrozenDictTupleStrDecimalBoolMixin(frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass): pass - # qty 6 - class NoneFrozenDictTupleStrDecimalBoolMixin( - NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass - ): + class NoneFrozenDictTupleStrDecimalBoolMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass): pass - # qty 8 - class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin( - NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes - ): + class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin(NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes): pass - else: # qty 1 class NoneMixin: _types = {NoneClass} - class FrozenDictMixin: _types = {frozendict.frozendict} - class TupleMixin: _types = {tuple} - class StrMixin: _types = {str} - class DecimalMixin: _types = {decimal.Decimal} - class BoolMixin: _types = {BoolClass} - class BytesMixin: _types = {bytes} - class FileMixin: _types = {FileIO} - # qty 2 class BinaryMixin: _types = {bytes, FileIO} - class NoneFrozenDictMixin: _types = {NoneClass, frozendict.frozendict} - class NoneTupleMixin: _types = {NoneClass, tuple} - class NoneStrMixin: _types = {NoneClass, str} - class NoneDecimalMixin: _types = {NoneClass, decimal.Decimal} - class NoneBoolMixin: _types = {NoneClass, BoolClass} - class FrozenDictTupleMixin: _types = {frozendict.frozendict, tuple} - class FrozenDictStrMixin: _types = {frozendict.frozendict, str} - class FrozenDictDecimalMixin: _types = {frozendict.frozendict, decimal.Decimal} - class FrozenDictBoolMixin: _types = {frozendict.frozendict, BoolClass} - class TupleStrMixin: _types = {tuple, str} - class TupleDecimalMixin: _types = {tuple, decimal.Decimal} - class TupleBoolMixin: _types = {tuple, BoolClass} - class StrDecimalMixin: _types = {str, decimal.Decimal} - class StrBoolMixin: _types = {str, BoolClass} - class DecimalBoolMixin: _types = {decimal.Decimal, BoolClass} - # qty 3 class NoneFrozenDictTupleMixin: _types = {NoneClass, frozendict.frozendict, tuple} - class NoneFrozenDictStrMixin: _types = {NoneClass, frozendict.frozendict, str} - class NoneFrozenDictDecimalMixin: _types = {NoneClass, frozendict.frozendict, decimal.Decimal} - class NoneFrozenDictBoolMixin: _types = {NoneClass, frozendict.frozendict, BoolClass} - class NoneTupleStrMixin: _types = {NoneClass, tuple, str} - class NoneTupleDecimalMixin: _types = {NoneClass, tuple, decimal.Decimal} - class NoneTupleBoolMixin: _types = {NoneClass, tuple, BoolClass} - class NoneStrDecimalMixin: _types = {NoneClass, str, decimal.Decimal} - class NoneStrBoolMixin: _types = {NoneClass, str, BoolClass} - class NoneDecimalBoolMixin: _types = {NoneClass, decimal.Decimal, BoolClass} - class FrozenDictTupleStrMixin: _types = {frozendict.frozendict, tuple, str} - class FrozenDictTupleDecimalMixin: _types = {frozendict.frozendict, tuple, decimal.Decimal} - class FrozenDictTupleBoolMixin: _types = {frozendict.frozendict, tuple, BoolClass} - class FrozenDictStrDecimalMixin: _types = {frozendict.frozendict, str, decimal.Decimal} - class FrozenDictStrBoolMixin: _types = {frozendict.frozendict, str, BoolClass} - class FrozenDictDecimalBoolMixin: _types = {frozendict.frozendict, decimal.Decimal, BoolClass} - class TupleStrDecimalMixin: _types = {tuple, str, decimal.Decimal} - class TupleStrBoolMixin: _types = {tuple, str, BoolClass} - class TupleDecimalBoolMixin: _types = {tuple, decimal.Decimal, BoolClass} - class StrDecimalBoolMixin: _types = {str, decimal.Decimal, BoolClass} - # qty 4 class NoneFrozenDictTupleStrMixin: _types = {NoneClass, frozendict.frozendict, tuple, str} - class NoneFrozenDictTupleDecimalMixin: _types = {NoneClass, frozendict.frozendict, tuple, decimal.Decimal} - class NoneFrozenDictTupleBoolMixin: _types = {NoneClass, frozendict.frozendict, tuple, BoolClass} - class NoneFrozenDictStrDecimalMixin: _types = {NoneClass, frozendict.frozendict, str, decimal.Decimal} - class NoneFrozenDictStrBoolMixin: _types = {NoneClass, frozendict.frozendict, str, BoolClass} - class NoneFrozenDictDecimalBoolMixin: _types = {NoneClass, frozendict.frozendict, decimal.Decimal, BoolClass} - class NoneTupleStrDecimalMixin: _types = {NoneClass, tuple, str, decimal.Decimal} - class NoneTupleStrBoolMixin: _types = {NoneClass, tuple, str, BoolClass} - class NoneTupleDecimalBoolMixin: _types = {NoneClass, tuple, decimal.Decimal, BoolClass} - class NoneStrDecimalBoolMixin: _types = {NoneClass, str, decimal.Decimal, BoolClass} - class FrozenDictTupleStrDecimalMixin: _types = {frozendict.frozendict, tuple, str, decimal.Decimal} - class FrozenDictTupleStrBoolMixin: _types = {frozendict.frozendict, tuple, str, BoolClass} - class FrozenDictTupleDecimalBoolMixin: _types = {frozendict.frozendict, tuple, decimal.Decimal, BoolClass} - class FrozenDictStrDecimalBoolMixin: _types = {frozendict.frozendict, str, decimal.Decimal, BoolClass} - class TupleStrDecimalBoolMixin: _types = {tuple, str, decimal.Decimal, BoolClass} - # qty 5 class NoneFrozenDictTupleStrDecimalMixin: _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal} - class NoneFrozenDictTupleStrBoolMixin: _types = {NoneClass, frozendict.frozendict, tuple, str, BoolClass} - class NoneFrozenDictTupleDecimalBoolMixin: _types = {NoneClass, frozendict.frozendict, tuple, decimal.Decimal, BoolClass} - class NoneFrozenDictStrDecimalBoolMixin: _types = {NoneClass, frozendict.frozendict, str, decimal.Decimal, BoolClass} - class NoneTupleStrDecimalBoolMixin: _types = {NoneClass, tuple, str, decimal.Decimal, BoolClass} - class FrozenDictTupleStrDecimalBoolMixin: _types = {frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass} - # qty 6 class NoneFrozenDictTupleStrDecimalBoolMixin: _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass} - # qty 8 class NoneFrozenDictTupleStrDecimalBoolFileBytesMixin: _types = {NoneClass, frozendict.frozendict, tuple, str, decimal.Decimal, BoolClass, FileIO, bytes} @@ -1077,11 +871,9 @@ def _is_json_validation_enabled_oapg(schema_keyword, configuration=None): configuration (Configuration): the configuration class. """ - return ( - configuration is None - or not hasattr(configuration, "_disabled_client_side_validations") - or schema_keyword not in configuration._disabled_client_side_validations - ) + return (configuration is None or + not hasattr(configuration, '_disabled_client_side_validations') or + schema_keyword not in configuration._disabled_client_side_validations) @staticmethod def _raise_validation_error_message_oapg(value, constraint_msg, constraint_value, path_to_item, additional_txt=""): @@ -1102,10 +894,7 @@ def _validate_oapg( cls, arg, validation_metadata: ValidationMetadata, - ) -> typing.Dict[ - typing.Tuple[typing.Union[str, int], ...], - typing.Set[typing.Union["Schema", str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]], - ]: + ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: """ EnumBase _validate_oapg Validates that arg is in the enum's allowed values @@ -1113,11 +902,7 @@ def _validate_oapg( try: cls.MetaOapg.enum_value_to_name[arg] except KeyError: - raise ApiValueError( - "Invalid value {} passed in to {}, allowed_values={}".format( - arg, cls, cls.MetaOapg.enum_value_to_name.keys() - ) - ) + raise ApiValueError("Invalid value {} passed in to {}, allowed_values={}".format(arg, cls, cls.MetaOapg.enum_value_to_name.keys())) return super()._validate_oapg(arg, validation_metadata=validation_metadata) @@ -1161,69 +946,68 @@ def as_str_oapg(self) -> str: @property def as_date_oapg(self) -> date: - raise Exception("not implemented") + raise Exception('not implemented') @property def as_datetime_oapg(self) -> datetime: - raise Exception("not implemented") + raise Exception('not implemented') @property def as_decimal_oapg(self) -> decimal.Decimal: - raise Exception("not implemented") + raise Exception('not implemented') @property def as_uuid_oapg(self) -> uuid.UUID: - raise Exception("not implemented") + raise Exception('not implemented') @classmethod - def __check_str_validations(cls, arg: str, validation_metadata: ValidationMetadata): - if not hasattr(cls, "MetaOapg"): + def __check_str_validations( + cls, + arg: str, + validation_metadata: ValidationMetadata + ): + if not hasattr(cls, 'MetaOapg'): return - if ( - cls._is_json_validation_enabled_oapg("maxLength", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "max_length") - and len(arg) > cls.MetaOapg.max_length - ): + if (cls._is_json_validation_enabled_oapg('maxLength', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'max_length') and + len(arg) > cls.MetaOapg.max_length): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="length must be less than or equal to", constraint_value=cls.MetaOapg.max_length, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if ( - cls._is_json_validation_enabled_oapg("minLength", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "min_length") - and len(arg) < cls.MetaOapg.min_length - ): + if (cls._is_json_validation_enabled_oapg('minLength', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'min_length') and + len(arg) < cls.MetaOapg.min_length): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="length must be greater than or equal to", constraint_value=cls.MetaOapg.min_length, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if cls._is_json_validation_enabled_oapg("pattern", validation_metadata.configuration) and hasattr( - cls.MetaOapg, "regex" - ): + if (cls._is_json_validation_enabled_oapg('pattern', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'regex')): for regex_dict in cls.MetaOapg.regex: - flags = regex_dict.get("flags", 0) - if not re.search(regex_dict["pattern"], arg, flags=flags): + flags = regex_dict.get('flags', 0) + if not re.search(regex_dict['pattern'], arg, flags=flags): if flags != 0: # Don't print the regex flags if the flags are not # specified in the OAS document. cls._raise_validation_error_message_oapg( value=arg, constraint_msg="must match regular expression", - constraint_value=regex_dict["pattern"], + constraint_value=regex_dict['pattern'], path_to_item=validation_metadata.path_to_item, - additional_txt=" with flags=`{}`".format(flags), + additional_txt=" with flags=`{}`".format(flags) ) cls._raise_validation_error_message_oapg( value=arg, constraint_msg="must match regular expression", - constraint_value=regex_dict["pattern"], - path_to_item=validation_metadata.path_to_item, + constraint_value=regex_dict['pattern'], + path_to_item=validation_metadata.path_to_item ) @classmethod @@ -1231,10 +1015,7 @@ def _validate_oapg( cls, arg, validation_metadata: ValidationMetadata, - ) -> typing.Dict[ - typing.Tuple[typing.Union[str, int], ...], - typing.Set[typing.Union["Schema", str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]], - ]: + ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: """ StrBase _validate_oapg Validates that validations pass @@ -1275,21 +1056,22 @@ def _validate_oapg( class CustomIsoparser(isoparser): + @_takes_ascii def parse_isodatetime(self, dt_str): components, pos = self._parse_isodate(dt_str) if len(dt_str) > pos: - if self._sep is None or dt_str[pos : pos + 1] == self._sep: - components += self._parse_isotime(dt_str[pos + 1 :]) + if self._sep is None or dt_str[pos:pos + 1] == self._sep: + components += self._parse_isotime(dt_str[pos + 1:]) else: - raise ValueError("String contains unknown ISO components") + raise ValueError('String contains unknown ISO components') if len(components) > 3 and components[3] == 24: components[3] = 0 return datetime(*components) + timedelta(days=1) if len(components) <= 3: - raise ValueError("Value is not a datetime") + raise ValueError('Value is not a datetime') return datetime(*components) @@ -1298,10 +1080,10 @@ def parse_isodate(self, datestr): components, pos = self._parse_isodate(datestr) if len(datestr) > pos: - raise ValueError("String contains invalid time components") + raise ValueError('String contains invalid time components') if len(components) > 3: - raise ValueError("String contains invalid time components") + raise ValueError('String contains invalid time components') return date(*components) @@ -1427,7 +1209,7 @@ def as_int_oapg(self) -> int: if self.as_tuple().exponent < 0: # this could be represented as an integer but should be represented as a float # because that's what it was serialized from - raise ApiValueError(f"{self} is not an integer") + raise ApiValueError(f'{self} is not an integer') self._as_int = int(self) return self._as_int @@ -1437,85 +1219,79 @@ def as_float_oapg(self) -> float: return self._as_float except AttributeError: if self.as_tuple().exponent >= 0: - raise ApiValueError(f"{self} is not a float") + raise ApiValueError(f'{self} is not a float') self._as_float = float(self) return self._as_float @classmethod - def __check_numeric_validations(cls, arg, validation_metadata: ValidationMetadata): - if not hasattr(cls, "MetaOapg"): + def __check_numeric_validations( + cls, + arg, + validation_metadata: ValidationMetadata + ): + if not hasattr(cls, 'MetaOapg'): return - if cls._is_json_validation_enabled_oapg("multipleOf", validation_metadata.configuration) and hasattr( - cls.MetaOapg, "multiple_of" - ): + if cls._is_json_validation_enabled_oapg('multipleOf', + validation_metadata.configuration) and hasattr(cls.MetaOapg, 'multiple_of'): multiple_of_value = cls.MetaOapg.multiple_of - if not (float(arg) / multiple_of_value).is_integer(): + if (not (float(arg) / multiple_of_value).is_integer()): # Note 'multipleOf' will be as good as the floating point arithmetic. cls._raise_validation_error_message_oapg( value=arg, constraint_msg="value must be a multiple of", constraint_value=multiple_of_value, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) checking_max_or_min_values = any( - hasattr(cls.MetaOapg, validation_key) - for validation_key in { - "exclusive_maximum", - "inclusive_maximum", - "exclusive_minimum", - "inclusive_minimum", + hasattr(cls.MetaOapg, validation_key) for validation_key in { + 'exclusive_maximum', + 'inclusive_maximum', + 'exclusive_minimum', + 'inclusive_minimum', } ) if not checking_max_or_min_values: return - if ( - cls._is_json_validation_enabled_oapg("exclusiveMaximum", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "exclusive_maximum") - and arg >= cls.MetaOapg.exclusive_maximum - ): + if (cls._is_json_validation_enabled_oapg('exclusiveMaximum', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'exclusive_maximum') and + arg >= cls.MetaOapg.exclusive_maximum): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="must be a value less than", constraint_value=cls.MetaOapg.exclusive_maximum, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if ( - cls._is_json_validation_enabled_oapg("maximum", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "inclusive_maximum") - and arg > cls.MetaOapg.inclusive_maximum - ): + if (cls._is_json_validation_enabled_oapg('maximum', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'inclusive_maximum') and + arg > cls.MetaOapg.inclusive_maximum): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="must be a value less than or equal to", constraint_value=cls.MetaOapg.inclusive_maximum, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if ( - cls._is_json_validation_enabled_oapg("exclusiveMinimum", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "exclusive_minimum") - and arg <= cls.MetaOapg.exclusive_minimum - ): + if (cls._is_json_validation_enabled_oapg('exclusiveMinimum', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'exclusive_minimum') and + arg <= cls.MetaOapg.exclusive_minimum): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="must be a value greater than", constraint_value=cls.MetaOapg.exclusive_maximum, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if ( - cls._is_json_validation_enabled_oapg("minimum", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "inclusive_minimum") - and arg < cls.MetaOapg.inclusive_minimum - ): + if (cls._is_json_validation_enabled_oapg('minimum', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'inclusive_minimum') and + arg < cls.MetaOapg.inclusive_minimum): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="must be a value greater than or equal to", constraint_value=cls.MetaOapg.inclusive_minimum, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) @classmethod @@ -1523,10 +1299,7 @@ def _validate_oapg( cls, arg, validation_metadata: ValidationMetadata, - ) -> typing.Dict[ - typing.Tuple[typing.Union[str, int], ...], - typing.Set[typing.Union["Schema", str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]], - ]: + ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: """ NumberBase _validate_oapg Validates that validations pass @@ -1556,64 +1329,59 @@ def __validate_items(cls, list_items, validation_metadata: ValidationMetadata): # if we have definitions for an items schema, use it # otherwise accept anything - item_cls = getattr(cls.MetaOapg, "items", UnsetAnyTypeSchema) + item_cls = getattr(cls.MetaOapg, 'items', UnsetAnyTypeSchema) item_cls = cls._get_class_oapg(item_cls) path_to_schemas = {} for i, value in enumerate(list_items): item_validation_metadata = ValidationMetadata( from_server=validation_metadata.from_server, configuration=validation_metadata.configuration, - path_to_item=validation_metadata.path_to_item + (i,), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas, + path_to_item=validation_metadata.path_to_item+(i,), + validated_path_to_schemas=validation_metadata.validated_path_to_schemas ) if item_validation_metadata.validation_ran_earlier(item_cls): add_deeper_validated_schemas(item_validation_metadata, path_to_schemas) continue - other_path_to_schemas = item_cls._validate_oapg(value, validation_metadata=item_validation_metadata) + other_path_to_schemas = item_cls._validate_oapg( + value, validation_metadata=item_validation_metadata) update(path_to_schemas, other_path_to_schemas) return path_to_schemas @classmethod - def __check_tuple_validations(cls, arg, validation_metadata: ValidationMetadata): - if not hasattr(cls, "MetaOapg"): + def __check_tuple_validations( + cls, arg, + validation_metadata: ValidationMetadata): + if not hasattr(cls, 'MetaOapg'): return - if ( - cls._is_json_validation_enabled_oapg("maxItems", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "max_items") - and len(arg) > cls.MetaOapg.max_items - ): + if (cls._is_json_validation_enabled_oapg('maxItems', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'max_items') and + len(arg) > cls.MetaOapg.max_items): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="number of items must be less than or equal to", constraint_value=cls.MetaOapg.max_items, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if ( - cls._is_json_validation_enabled_oapg("minItems", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "min_items") - and len(arg) < cls.MetaOapg.min_items - ): + if (cls._is_json_validation_enabled_oapg('minItems', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'min_items') and + len(arg) < cls.MetaOapg.min_items): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="number of items must be greater than or equal to", constraint_value=cls.MetaOapg.min_items, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if ( - cls._is_json_validation_enabled_oapg("uniqueItems", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "unique_items") - and cls.MetaOapg.unique_items - and arg - ): + if (cls._is_json_validation_enabled_oapg('uniqueItems', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'unique_items') and cls.MetaOapg.unique_items and arg): unique_items = set(arg) if len(arg) > len(unique_items): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="duplicate items were found, and the tuple must not contain duplicates because", - constraint_value="unique_items==True", - path_to_item=validation_metadata.path_to_item, + constraint_value='unique_items==True', + path_to_item=validation_metadata.path_to_item ) @classmethod @@ -1647,7 +1415,7 @@ def _validate_oapg( from_server=validation_metadata.from_server, path_to_item=validation_metadata.path_to_item, seen_classes=validation_metadata.seen_classes | frozenset({cls}), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas, + validated_path_to_schemas=validation_metadata.validated_path_to_schemas ) other_path_to_schemas = cls.__validate_items(arg, validation_metadata=updated_vm) update(_path_to_schemas, other_path_to_schemas) @@ -1655,20 +1423,24 @@ def _validate_oapg( @classmethod def _get_items_oapg( - cls: "Schema", + cls: 'Schema', arg: typing.List[typing.Any], path_to_item: typing.Tuple[typing.Union[str, int], ...], - path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]], + path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']] ): - """ + ''' ListBase _get_items_oapg - """ + ''' cast_items = [] for i, value in enumerate(arg): item_path_to_item = path_to_item + (i,) item_cls = path_to_schemas[item_path_to_item] - new_value = item_cls._get_new_instance_without_conversion_oapg(value, item_path_to_item, path_to_schemas) + new_value = item_cls._get_new_instance_without_conversion_oapg( + value, + item_path_to_item, + path_to_schemas + ) cast_items.append(new_value) return cast_items @@ -1678,16 +1450,12 @@ class Discriminable: MetaOapg: MetaOapgTyped @classmethod - def _ensure_discriminator_value_present_oapg( - cls, disc_property_name: str, validation_metadata: ValidationMetadata, *args - ): + def _ensure_discriminator_value_present_oapg(cls, disc_property_name: str, validation_metadata: ValidationMetadata, *args): if not args or args and disc_property_name not in args[0]: # The input data does not contain the discriminator property raise ApiValueError( "Cannot deserialize input data due to missing discriminator. " - "The discriminator property '{}' is missing at path: {}".format( - disc_property_name, validation_metadata.path_to_item - ) + "The discriminator property '{}' is missing at path: {}".format(disc_property_name, validation_metadata.path_to_item) ) @classmethod @@ -1695,7 +1463,7 @@ def get_discriminated_class_oapg(cls, disc_property_name: str, disc_payload_valu """ Used in schemas with discriminators """ - if not hasattr(cls.MetaOapg, "discriminator"): + if not hasattr(cls.MetaOapg, 'discriminator'): return None disc = cls.MetaOapg.discriminator() if disc_property_name not in disc: @@ -1703,38 +1471,38 @@ def get_discriminated_class_oapg(cls, disc_property_name: str, disc_payload_valu discriminated_cls = disc[disc_property_name].get(disc_payload_value) if discriminated_cls is not None: return discriminated_cls - if not hasattr(cls, "MetaOapg"): + if not hasattr(cls, 'MetaOapg'): return None elif not ( - hasattr(cls.MetaOapg, "all_of") or hasattr(cls.MetaOapg, "one_of") or hasattr(cls.MetaOapg, "any_of") + hasattr(cls.MetaOapg, 'all_of') or + hasattr(cls.MetaOapg, 'one_of') or + hasattr(cls.MetaOapg, 'any_of') ): return None # TODO stop traveling if a cycle is hit - if hasattr(cls.MetaOapg, "all_of"): + if hasattr(cls.MetaOapg, 'all_of'): for allof_cls in cls.MetaOapg.all_of(): discriminated_cls = allof_cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=disc_payload_value - ) + disc_property_name=disc_property_name, disc_payload_value=disc_payload_value) if discriminated_cls is not None: return discriminated_cls - if hasattr(cls.MetaOapg, "one_of"): + if hasattr(cls.MetaOapg, 'one_of'): for oneof_cls in cls.MetaOapg.one_of(): discriminated_cls = oneof_cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=disc_payload_value - ) + disc_property_name=disc_property_name, disc_payload_value=disc_payload_value) if discriminated_cls is not None: return discriminated_cls - if hasattr(cls.MetaOapg, "any_of"): + if hasattr(cls.MetaOapg, 'any_of'): for anyof_cls in cls.MetaOapg.any_of(): discriminated_cls = anyof_cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=disc_payload_value - ) + disc_property_name=disc_property_name, disc_payload_value=disc_payload_value) if discriminated_cls is not None: return discriminated_cls return None class DictBase(Discriminable, ValidatorBase): + @classmethod def __validate_arg_presence(cls, arg): """ @@ -1757,10 +1525,10 @@ def __validate_arg_presence(cls, arg): """ seen_required_properties = set() invalid_arguments = [] - required_property_names = getattr(cls.MetaOapg, "required", set()) - additional_properties = getattr(cls.MetaOapg, "additional_properties", UnsetAnyTypeSchema) - properties = getattr(cls.MetaOapg, "properties", {}) - property_annotations = getattr(properties, "__annotations__", {}) + required_property_names = getattr(cls.MetaOapg, 'required', set()) + additional_properties = getattr(cls.MetaOapg, 'additional_properties', UnsetAnyTypeSchema) + properties = getattr(cls.MetaOapg, 'properties', {}) + property_annotations = getattr(properties, '__annotations__', {}) for property_name in arg: if property_name in required_property_names: seen_required_properties.add(property_name) @@ -1778,14 +1546,17 @@ def __validate_arg_presence(cls, arg): cls.__name__, len(missing_required_arguments), "s" if len(missing_required_arguments) > 1 else "", - missing_required_arguments, + missing_required_arguments ) ) if invalid_arguments: invalid_arguments.sort() raise ApiTypeError( "{} was passed {} invalid argument{}: {}".format( - cls.__name__, len(invalid_arguments), "s" if len(invalid_arguments) > 1 else "", invalid_arguments + cls.__name__, + len(invalid_arguments), + "s" if len(invalid_arguments) > 1 else "", + invalid_arguments ) ) @@ -1804,11 +1575,11 @@ def __validate_args(cls, arg, validation_metadata: ValidationMetadata): ApiTypeError - for missing required arguments, or for invalid properties """ path_to_schemas = {} - additional_properties = getattr(cls.MetaOapg, "additional_properties", UnsetAnyTypeSchema) - properties = getattr(cls.MetaOapg, "properties", {}) - property_annotations = getattr(properties, "__annotations__", {}) + additional_properties = getattr(cls.MetaOapg, 'additional_properties', UnsetAnyTypeSchema) + properties = getattr(cls.MetaOapg, 'properties', {}) + property_annotations = getattr(properties, '__annotations__', {}) for property_name, value in arg.items(): - path_to_item = validation_metadata.path_to_item + (property_name,) + path_to_item = validation_metadata.path_to_item+(property_name,) if property_name in property_annotations: schema = property_annotations[property_name] elif additional_properties is not NotAnyTypeSchema: @@ -1822,17 +1593,15 @@ def __validate_args(cls, arg, validation_metadata: ValidationMetadata): continue schema = additional_properties else: - raise ApiTypeError( - "Unable to find schema for value={} in class={} at path_to_item={}".format( - value, cls, validation_metadata.path_to_item + (property_name,) - ) - ) + raise ApiTypeError('Unable to find schema for value={} in class={} at path_to_item={}'.format( + value, cls, validation_metadata.path_to_item+(property_name,) + )) schema = cls._get_class_oapg(schema) arg_validation_metadata = ValidationMetadata( from_server=validation_metadata.from_server, configuration=validation_metadata.configuration, path_to_item=path_to_item, - validated_path_to_schemas=validation_metadata.validated_path_to_schemas, + validated_path_to_schemas=validation_metadata.validated_path_to_schemas ) if arg_validation_metadata.validation_ran_earlier(schema): add_deeper_validated_schemas(arg_validation_metadata, path_to_schemas) @@ -1842,31 +1611,31 @@ def __validate_args(cls, arg, validation_metadata: ValidationMetadata): return path_to_schemas @classmethod - def __check_dict_validations(cls, arg, validation_metadata: ValidationMetadata): - if not hasattr(cls, "MetaOapg"): + def __check_dict_validations( + cls, + arg, + validation_metadata: ValidationMetadata + ): + if not hasattr(cls, 'MetaOapg'): return - if ( - cls._is_json_validation_enabled_oapg("maxProperties", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "max_properties") - and len(arg) > cls.MetaOapg.max_properties - ): + if (cls._is_json_validation_enabled_oapg('maxProperties', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'max_properties') and + len(arg) > cls.MetaOapg.max_properties): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="number of properties must be less than or equal to", constraint_value=cls.MetaOapg.max_properties, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) - if ( - cls._is_json_validation_enabled_oapg("minProperties", validation_metadata.configuration) - and hasattr(cls.MetaOapg, "min_properties") - and len(arg) < cls.MetaOapg.min_properties - ): + if (cls._is_json_validation_enabled_oapg('minProperties', validation_metadata.configuration) and + hasattr(cls.MetaOapg, 'min_properties') and + len(arg) < cls.MetaOapg.min_properties): cls._raise_validation_error_message_oapg( value=arg, constraint_msg="number of properties must be greater than or equal to", constraint_value=cls.MetaOapg.min_properties, - path_to_item=validation_metadata.path_to_item, + path_to_item=validation_metadata.path_to_item ) @classmethod @@ -1906,15 +1675,14 @@ def _validate_oapg( disc_prop_name = list(discriminator.keys())[0] cls._ensure_discriminator_value_present_oapg(disc_prop_name, validation_metadata, arg) discriminated_cls = cls.get_discriminated_class_oapg( - disc_property_name=disc_prop_name, disc_payload_value=arg[disc_prop_name] - ) + disc_property_name=disc_prop_name, disc_payload_value=arg[disc_prop_name]) if discriminated_cls is None: raise ApiValueError( "Invalid discriminator value was passed in to {}.{} Only the values {} are allowed at {}".format( cls.__name__, disc_prop_name, list(discriminator[disc_prop_name].keys()), - validation_metadata.path_to_item + (disc_prop_name,), + validation_metadata.path_to_item + (disc_prop_name,) ) ) updated_vm = ValidationMetadata( @@ -1922,7 +1690,7 @@ def _validate_oapg( from_server=validation_metadata.from_server, path_to_item=validation_metadata.path_to_item, seen_classes=validation_metadata.seen_classes | frozenset({cls}), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas, + validated_path_to_schemas=validation_metadata.validated_path_to_schemas ) if updated_vm.validation_ran_earlier(discriminated_cls): add_deeper_validated_schemas(updated_vm, _path_to_schemas) @@ -1936,7 +1704,7 @@ def _get_properties_oapg( cls, arg: typing.Dict[str, typing.Any], path_to_item: typing.Tuple[typing.Union[str, int], ...], - path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type["Schema"]], + path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Type['Schema']] ): """ DictBase _get_properties_oapg, this is how properties are set @@ -1948,7 +1716,9 @@ def _get_properties_oapg( property_path_to_item = path_to_item + (property_name_js,) property_cls = path_to_schemas[property_path_to_item] new_value = property_cls._get_new_instance_without_conversion_oapg( - value, property_path_to_item, path_to_schemas + value, + property_path_to_item, + path_to_schemas ) dict_items[property_name_js] = new_value @@ -1956,7 +1726,7 @@ def _get_properties_oapg( def __setattr__(self, name: str, value: typing.Any): if not isinstance(self, FileIO): - raise AttributeError("property setting not supported on immutable instances") + raise AttributeError('property setting not supported on immutable instances') def __getattr__(self, name: str): """ @@ -1983,7 +1753,7 @@ def __getitem__(self, name: str): return super().__getattr__(name) return super().__getitem__(name) - def get_item_oapg(self, name: str) -> typing.Union["AnyTypeSchema", Unset]: + def get_item_oapg(self, name: str) -> typing.Union['AnyTypeSchema', Unset]: # dict_instance[name] accessor if not isinstance(self, frozendict.frozendict): raise NotImplementedError() @@ -1994,30 +1764,10 @@ def get_item_oapg(self, name: str) -> typing.Union["AnyTypeSchema", Unset]: def cast_to_allowed_types( - arg: typing.Union[ - str, - date, - datetime, - uuid.UUID, - decimal.Decimal, - int, - float, - None, - dict, - frozendict.frozendict, - list, - tuple, - bytes, - Schema, - io.FileIO, - io.BufferedReader, - ], + arg: typing.Union[str, date, datetime, uuid.UUID, decimal.Decimal, int, float, None, dict, frozendict.frozendict, list, tuple, bytes, Schema, io.FileIO, io.BufferedReader], from_server: bool, - validated_path_to_schemas: typing.Dict[ - typing.Tuple[typing.Union[str, int], ...], - typing.Set[typing.Union["Schema", str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]], - ], - path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(["args[0]"]), + validated_path_to_schemas: typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]], + path_to_item: typing.Tuple[typing.Union[str, int], ...] = tuple(['args[0]']), ) -> typing.Union[frozendict.frozendict, tuple, decimal.Decimal, str, bytes, BoolClass, NoneClass, FileIO]: """ Casts the input payload arg into the allowed types @@ -2049,18 +1799,11 @@ def cast_to_allowed_types( schema_classes.add(cls) validated_path_to_schemas[path_to_item] = schema_classes - type_error = ApiTypeError( - f"Invalid type. Required value type is str and passed type was {type(arg)} at {path_to_item}" - ) + type_error = ApiTypeError(f"Invalid type. Required value type is str and passed type was {type(arg)} at {path_to_item}") if isinstance(arg, str): return str(arg) elif isinstance(arg, (dict, frozendict.frozendict)): - return frozendict.frozendict( - { - key: cast_to_allowed_types(val, from_server, validated_path_to_schemas, path_to_item + (key,)) - for key, val in arg.items() - } - ) + return frozendict.frozendict({key: cast_to_allowed_types(val, from_server, validated_path_to_schemas, path_to_item + (key,)) for key, val in arg.items()}) elif isinstance(arg, (bool, BoolClass)): """ this check must come before isinstance(arg, (int, float)) @@ -2076,15 +1819,10 @@ def cast_to_allowed_types( if decimal_from_float.as_integer_ratio()[1] == 1: # 9.0 -> Decimal('9.0') # 3.4028234663852886e+38 -> Decimal('340282346638528859811704183484516925440.0') - return decimal.Decimal(str(decimal_from_float) + ".0") + return decimal.Decimal(str(decimal_from_float)+'.0') return decimal_from_float elif isinstance(arg, (tuple, list)): - return tuple( - [ - cast_to_allowed_types(item, from_server, validated_path_to_schemas, path_to_item + (i,)) - for i, item in enumerate(arg) - ] - ) + return tuple([cast_to_allowed_types(item, from_server, validated_path_to_schemas, path_to_item + (i,)) for i, item in enumerate(arg)]) elif isinstance(arg, (none_type, NoneClass)): return NoneClass.NONE elif isinstance(arg, (date, datetime)): @@ -2101,10 +1839,11 @@ def cast_to_allowed_types( return bytes(arg) elif isinstance(arg, (io.FileIO, io.BufferedReader)): return FileIO(arg) - raise ValueError("Invalid type passed in got input={} type={}".format(arg, type(arg))) + raise ValueError('Invalid type passed in got input={} type={}'.format(arg, type(arg))) class ComposedBase(Discriminable): + @classmethod def __get_allof_classes(cls, arg, validation_metadata: ValidationMetadata): path_to_schemas = defaultdict(set) @@ -2154,7 +1893,12 @@ def __get_oneof_class( return path_to_schemas @classmethod - def __get_anyof_classes(cls, arg, discriminated_cls, validation_metadata: ValidationMetadata): + def __get_anyof_classes( + cls, + arg, + discriminated_cls, + validation_metadata: ValidationMetadata + ): anyof_classes = [] path_to_schemas = defaultdict(set) for anyof_cls in cls.MetaOapg.any_of(): @@ -2183,10 +1927,7 @@ def _validate_oapg( cls, arg, validation_metadata: ValidationMetadata, - ) -> typing.Dict[ - typing.Tuple[typing.Union[str, int], ...], - typing.Set[typing.Union["Schema", str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]], - ]: + ) -> typing.Dict[typing.Tuple[typing.Union[str, int], ...], typing.Set[typing.Union['Schema', str, decimal.Decimal, BoolClass, NoneClass, frozendict.frozendict, tuple]]]: """ ComposedBase _validate_oapg We return dynamic classes of different bases depending upon the inputs @@ -2210,12 +1951,12 @@ def _validate_oapg( from_server=validation_metadata.from_server, path_to_item=validation_metadata.path_to_item, seen_classes=validation_metadata.seen_classes | frozenset({cls}), - validated_path_to_schemas=validation_metadata.validated_path_to_schemas, + validated_path_to_schemas=validation_metadata.validated_path_to_schemas ) # process composed schema discriminator = None - if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "discriminator"): + if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'discriminator'): discriminator = cls.MetaOapg.discriminator() discriminated_cls = None if discriminator and arg and isinstance(arg, frozendict.frozendict): @@ -2223,8 +1964,7 @@ def _validate_oapg( cls._ensure_discriminator_value_present_oapg(disc_property_name, updated_vm, arg) # get discriminated_cls by looking at the dict in the current class discriminated_cls = cls.get_discriminated_class_oapg( - disc_property_name=disc_property_name, disc_payload_value=arg[disc_property_name] - ) + disc_property_name=disc_property_name, disc_payload_value=arg[disc_property_name]) if discriminated_cls is None: raise ApiValueError( "Invalid discriminator value '{}' was passed in to {}.{} Only the values {} are allowed at {}".format( @@ -2232,25 +1972,29 @@ def _validate_oapg( cls.__name__, disc_property_name, list(discriminator[disc_property_name].keys()), - updated_vm.path_to_item + (disc_property_name,), + updated_vm.path_to_item + (disc_property_name,) ) ) - if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "all_of"): + if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'all_of'): other_path_to_schemas = cls.__get_allof_classes(arg, validation_metadata=updated_vm) update(path_to_schemas, other_path_to_schemas) - if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "one_of"): + if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'one_of'): other_path_to_schemas = cls.__get_oneof_class( - arg, discriminated_cls=discriminated_cls, validation_metadata=updated_vm + arg, + discriminated_cls=discriminated_cls, + validation_metadata=updated_vm ) update(path_to_schemas, other_path_to_schemas) - if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "any_of"): + if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'any_of'): other_path_to_schemas = cls.__get_anyof_classes( - arg, discriminated_cls=discriminated_cls, validation_metadata=updated_vm + arg, + discriminated_cls=discriminated_cls, + validation_metadata=updated_vm ) update(path_to_schemas, other_path_to_schemas) not_cls = None - if hasattr(cls, "MetaOapg") and hasattr(cls.MetaOapg, "not_schema"): + if hasattr(cls, 'MetaOapg') and hasattr(cls.MetaOapg, 'not_schema'): not_cls = cls.MetaOapg.not_schema not_cls = cls._get_class_oapg(not_cls) if not_cls: @@ -2289,29 +2033,37 @@ class ComposedSchema( BoolBase, NoneBase, Schema, - NoneFrozenDictTupleStrDecimalBoolMixin, + NoneFrozenDictTupleStrDecimalBoolMixin ): @classmethod def from_openapi_data_oapg(cls, *args: typing.Any, _configuration: typing.Optional[Configuration] = None, **kwargs): if not args: if not kwargs: - raise ApiTypeError("{} is missing required input data in args or kwargs".format(cls.__name__)) - args = (kwargs,) + raise ApiTypeError('{} is missing required input data in args or kwargs'.format(cls.__name__)) + args = (kwargs, ) return super().from_openapi_data_oapg(args[0], _configuration=_configuration) -class ListSchema(ListBase, Schema, TupleMixin): +class ListSchema( + ListBase, + Schema, + TupleMixin +): + @classmethod - def from_openapi_data_oapg( - cls, arg: typing.List[typing.Any], _configuration: typing.Optional[Configuration] = None - ): + def from_openapi_data_oapg(cls, arg: typing.List[typing.Any], _configuration: typing.Optional[Configuration] = None): return super().from_openapi_data_oapg(arg, _configuration=_configuration) def __new__(cls, _arg: typing.Union[typing.List[typing.Any], typing.Tuple[typing.Any]], **kwargs: Configuration): return super().__new__(cls, _arg, **kwargs) -class NoneSchema(NoneBase, Schema, NoneMixin): +class NoneSchema( + NoneBase, + Schema, + NoneMixin +): + @classmethod def from_openapi_data_oapg(cls, arg: None, _configuration: typing.Optional[Configuration] = None): return super().from_openapi_data_oapg(arg, _configuration=_configuration) @@ -2320,16 +2072,18 @@ def __new__(cls, _arg: None, **kwargs: Configuration): return super().__new__(cls, _arg, **kwargs) -class NumberSchema(NumberBase, Schema, DecimalMixin): +class NumberSchema( + NumberBase, + Schema, + DecimalMixin +): """ This is used for type: number with no format Both integers AND floats are accepted """ @classmethod - def from_openapi_data_oapg( - cls, arg: typing.Union[int, float], _configuration: typing.Optional[Configuration] = None - ): + def from_openapi_data_oapg(cls, arg: typing.Union[int, float], _configuration: typing.Optional[Configuration] = None): return super().from_openapi_data_oapg(arg, _configuration=_configuration) def __new__(cls, _arg: typing.Union[decimal.Decimal, int, float], **kwargs: Configuration): @@ -2348,6 +2102,7 @@ def as_int_oapg(self) -> int: @classmethod def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): if isinstance(arg, decimal.Decimal): + denominator = arg.as_integer_ratio()[-1] if denominator != 1: raise ApiValueError( @@ -2369,6 +2124,7 @@ def _validate_oapg( class IntSchema(IntBase, NumberSchema): + @classmethod def from_openapi_data_oapg(cls, arg: int, _configuration: typing.Optional[Configuration] = None): return super().from_openapi_data_oapg(arg, _configuration=_configuration) @@ -2402,7 +2158,10 @@ def _validate_oapg( return super()._validate_oapg(arg, validation_metadata=validation_metadata) -class Int32Schema(Int32Base, IntSchema): +class Int32Schema( + Int32Base, + IntSchema +): pass @@ -2431,13 +2190,16 @@ def _validate_oapg( return super()._validate_oapg(arg, validation_metadata=validation_metadata) -class Int64Schema(Int64Base, IntSchema): +class Int64Schema( + Int64Base, + IntSchema +): pass class Float32Base: - __inclusive_minimum = decimal.Decimal(-3.4028234663852886e38) - __inclusive_maximum = decimal.Decimal(3.4028234663852886e38) + __inclusive_minimum = decimal.Decimal(-3.4028234663852886e+38) + __inclusive_maximum = decimal.Decimal(3.4028234663852886e+38) @classmethod def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): @@ -2460,15 +2222,19 @@ def _validate_oapg( return super()._validate_oapg(arg, validation_metadata=validation_metadata) -class Float32Schema(Float32Base, NumberSchema): +class Float32Schema( + Float32Base, + NumberSchema +): + @classmethod def from_openapi_data_oapg(cls, arg: float, _configuration: typing.Optional[Configuration] = None): return super().from_openapi_data_oapg(arg, _configuration=_configuration) class Float64Base: - __inclusive_minimum = decimal.Decimal(-1.7976931348623157e308) - __inclusive_maximum = decimal.Decimal(1.7976931348623157e308) + __inclusive_minimum = decimal.Decimal(-1.7976931348623157E+308) + __inclusive_maximum = decimal.Decimal(1.7976931348623157E+308) @classmethod def __validate_format(cls, arg: typing.Optional[decimal.Decimal], validation_metadata: ValidationMetadata): @@ -2490,15 +2256,22 @@ def _validate_oapg( cls.__validate_format(arg, validation_metadata=validation_metadata) return super()._validate_oapg(arg, validation_metadata=validation_metadata) +class Float64Schema( + Float64Base, + NumberSchema +): -class Float64Schema(Float64Base, NumberSchema): @classmethod def from_openapi_data_oapg(cls, arg: float, _configuration: typing.Optional[Configuration] = None): # todo check format return super().from_openapi_data_oapg(arg, _configuration=_configuration) -class StrSchema(StrBase, Schema, StrMixin): +class StrSchema( + StrBase, + Schema, + StrMixin +): """ date + datetime string types must inherit from this class That is because one can validate a str payload as both: @@ -2507,7 +2280,7 @@ class StrSchema(StrBase, Schema, StrMixin): """ @classmethod - def from_openapi_data_oapg(cls, arg: str, _configuration: typing.Optional[Configuration] = None) -> "StrSchema": + def from_openapi_data_oapg(cls, arg: str, _configuration: typing.Optional[Configuration] = None) -> 'StrSchema': return super().from_openapi_data_oapg(arg, _configuration=_configuration) def __new__(cls, _arg: typing.Union[str, date, datetime, uuid.UUID], **kwargs: Configuration): @@ -2515,21 +2288,25 @@ def __new__(cls, _arg: typing.Union[str, date, datetime, uuid.UUID], **kwargs: C class UUIDSchema(UUIDBase, StrSchema): + def __new__(cls, _arg: typing.Union[str, uuid.UUID], **kwargs: Configuration): return super().__new__(cls, _arg, **kwargs) class DateSchema(DateBase, StrSchema): + def __new__(cls, _arg: typing.Union[str, date], **kwargs: Configuration): return super().__new__(cls, _arg, **kwargs) class DateTimeSchema(DateTimeBase, StrSchema): + def __new__(cls, _arg: typing.Union[str, datetime], **kwargs: Configuration): return super().__new__(cls, _arg, **kwargs) class DecimalSchema(DecimalBase, StrSchema): + def __new__(cls, _arg: str, **kwargs: Configuration): """ Note: Decimals may not be passed in because cast_to_allowed_types is only invoked once for payloads @@ -2542,16 +2319,21 @@ def __new__(cls, _arg: str, **kwargs: Configuration): return super().__new__(cls, _arg, **kwargs) -class BytesSchema(Schema, BytesMixin): +class BytesSchema( + Schema, + BytesMixin +): """ this class will subclass bytes and is immutable """ - def __new__(cls, _arg: bytes, **kwargs: Configuration): return super(Schema, cls).__new__(cls, _arg) -class FileSchema(Schema, FileMixin): +class FileSchema( + Schema, + FileMixin +): """ This class is NOT immutable Dynamic classes are built using it for example when AnyType allows in binary data @@ -2577,7 +2359,12 @@ class BinaryBase: pass -class BinarySchema(ComposedBase, BinaryBase, Schema, BinaryMixin): +class BinarySchema( + ComposedBase, + BinaryBase, + Schema, + BinaryMixin +): class MetaOapg: @staticmethod def one_of(): @@ -2590,7 +2377,12 @@ def __new__(cls, _arg: typing.Union[io.FileIO, io.BufferedReader, bytes], **kwar return super().__new__(cls, _arg) -class BoolSchema(BoolBase, Schema, BoolMixin): +class BoolSchema( + BoolBase, + Schema, + BoolMixin +): + @classmethod def from_openapi_data_oapg(cls, arg: bool, _configuration: typing.Optional[Configuration] = None): return super().from_openapi_data_oapg(arg, _configuration=_configuration) @@ -2600,7 +2392,14 @@ def __new__(cls, _arg: bool, **kwargs: ValidationMetadata): class AnyTypeSchema( - DictBase, ListBase, NumberBase, StrBase, BoolBase, NoneBase, Schema, NoneFrozenDictTupleStrDecimalBoolFileBytesMixin + DictBase, + ListBase, + NumberBase, + StrBase, + BoolBase, + NoneBase, + Schema, + NoneFrozenDictTupleStrDecimalBoolFileBytesMixin ): # Python representation of a schema defined as true or {} pass @@ -2627,7 +2426,7 @@ def __new__( cls, *_args, _configuration: typing.Optional[Configuration] = None, - ) -> "NotAnyTypeSchema": + ) -> 'NotAnyTypeSchema': return super().__new__( cls, *_args, @@ -2635,35 +2434,16 @@ def __new__( ) -class DictSchema(DictBase, Schema, FrozenDictMixin): +class DictSchema( + DictBase, + Schema, + FrozenDictMixin +): @classmethod - def from_openapi_data_oapg( - cls, arg: typing.Dict[str, typing.Any], _configuration: typing.Optional[Configuration] = None - ): + def from_openapi_data_oapg(cls, arg: typing.Dict[str, typing.Any], _configuration: typing.Optional[Configuration] = None): return super().from_openapi_data_oapg(arg, _configuration=_configuration) - def __new__( - cls, - *_args: typing.Union[dict, frozendict.frozendict], - **kwargs: typing.Union[ - dict, - frozendict.frozendict, - list, - tuple, - decimal.Decimal, - float, - int, - str, - date, - datetime, - bool, - None, - bytes, - Schema, - Unset, - ValidationMetadata, - ], - ): + def __new__(cls, *_args: typing.Union[dict, frozendict.frozendict], **kwargs: typing.Union[dict, frozendict.frozendict, list, tuple, decimal.Decimal, float, int, str, date, datetime, bool, None, bytes, Schema, Unset, ValidationMetadata]): return super().__new__(cls, *_args, **kwargs) @@ -2672,7 +2452,8 @@ def __new__( @functools.lru_cache() def get_new_class( - class_name: str, bases: typing.Tuple[typing.Type[typing.Union[Schema, typing.Any]], ...] + class_name: str, + bases: typing.Tuple[typing.Type[typing.Union[Schema, typing.Any]], ...] ) -> typing.Type[Schema]: """ Returns a new class that is made with the subclass bases @@ -2686,4 +2467,4 @@ def get_new_class( def log_cache_usage(cache_fn): if LOG_CACHE_USAGE: - print(cache_fn.__name__, cache_fn.cache_info()) + print(cache_fn.__name__, cache_fn.cache_info()) \ No newline at end of file diff --git a/launch/api_client/test/__init__.py b/launch/api_client/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/launch/api_client/test/test_models/__init__.py b/launch/api_client/test/test_models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/launch/api_client/test/test_models/test_annotation.py b/launch/api_client/test/test_models/test_annotation.py new file mode 100644 index 00000000..16171d1c --- /dev/null +++ b/launch/api_client/test/test_models/test_annotation.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.annotation import Annotation + + +class TestAnnotation(unittest.TestCase): + """Annotation unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_audio.py b/launch/api_client/test/test_models/test_audio.py new file mode 100644 index 00000000..48f48bec --- /dev/null +++ b/launch/api_client/test/test_models/test_audio.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.audio import Audio + + +class TestAudio(unittest.TestCase): + """Audio unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_audio1.py b/launch/api_client/test/test_models/test_audio1.py new file mode 100644 index 00000000..1f854bd7 --- /dev/null +++ b/launch/api_client/test/test_models/test_audio1.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.audio1 import Audio1 + + +class TestAudio1(unittest.TestCase): + """Audio1 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_audio2.py b/launch/api_client/test/test_models/test_audio2.py new file mode 100644 index 00000000..b2f3bfc0 --- /dev/null +++ b/launch/api_client/test/test_models/test_audio2.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.audio2 import Audio2 + + +class TestAudio2(unittest.TestCase): + """Audio2 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_completions_job.py b/launch/api_client/test/test_models/test_batch_completions_job.py new file mode 100644 index 00000000..c260fbb7 --- /dev/null +++ b/launch/api_client/test/test_models/test_batch_completions_job.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.batch_completions_job import BatchCompletionsJob + + +class TestBatchCompletionsJob(unittest.TestCase): + """BatchCompletionsJob unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_completions_job_status.py b/launch/api_client/test/test_models/test_batch_completions_job_status.py new file mode 100644 index 00000000..fd6a44e0 --- /dev/null +++ b/launch/api_client/test/test_models/test_batch_completions_job_status.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.batch_completions_job_status import ( + BatchCompletionsJobStatus, +) + + +class TestBatchCompletionsJobStatus(unittest.TestCase): + """BatchCompletionsJobStatus unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_completions_model_config.py b/launch/api_client/test/test_models/test_batch_completions_model_config.py new file mode 100644 index 00000000..6760fe00 --- /dev/null +++ b/launch/api_client/test/test_models/test_batch_completions_model_config.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.batch_completions_model_config import ( + BatchCompletionsModelConfig, +) + + +class TestBatchCompletionsModelConfig(unittest.TestCase): + """BatchCompletionsModelConfig unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_job_serialization_format.py b/launch/api_client/test/test_models/test_batch_job_serialization_format.py new file mode 100644 index 00000000..8aee9056 --- /dev/null +++ b/launch/api_client/test/test_models/test_batch_job_serialization_format.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.batch_job_serialization_format import ( + BatchJobSerializationFormat, +) + + +class TestBatchJobSerializationFormat(unittest.TestCase): + """BatchJobSerializationFormat unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_batch_job_status.py b/launch/api_client/test/test_models/test_batch_job_status.py new file mode 100644 index 00000000..9df0489e --- /dev/null +++ b/launch/api_client/test/test_models/test_batch_job_status.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.batch_job_status import BatchJobStatus + + +class TestBatchJobStatus(unittest.TestCase): + """BatchJobStatus unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_callback_auth.py b/launch/api_client/test/test_models/test_callback_auth.py new file mode 100644 index 00000000..2db08713 --- /dev/null +++ b/launch/api_client/test/test_models/test_callback_auth.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.callback_auth import CallbackAuth + + +class TestCallbackAuth(unittest.TestCase): + """CallbackAuth unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_callback_basic_auth.py b/launch/api_client/test/test_models/test_callback_basic_auth.py new file mode 100644 index 00000000..f4cd9c54 --- /dev/null +++ b/launch/api_client/test/test_models/test_callback_basic_auth.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.callback_basic_auth import CallbackBasicAuth + + +class TestCallbackBasicAuth(unittest.TestCase): + """CallbackBasicAuth unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_callbackm_tls_auth.py b/launch/api_client/test/test_models/test_callbackm_tls_auth.py new file mode 100644 index 00000000..90218ce5 --- /dev/null +++ b/launch/api_client/test/test_models/test_callbackm_tls_auth.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.callbackm_tls_auth import CallbackmTLSAuth + + +class TestCallbackmTLSAuth(unittest.TestCase): + """CallbackmTLSAuth unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_cancel_batch_completions_v2_response.py b/launch/api_client/test/test_models/test_cancel_batch_completions_v2_response.py new file mode 100644 index 00000000..f40b55b8 --- /dev/null +++ b/launch/api_client/test/test_models/test_cancel_batch_completions_v2_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.cancel_batch_completions_v2_response import ( + CancelBatchCompletionsV2Response, +) + + +class TestCancelBatchCompletionsV2Response(unittest.TestCase): + """CancelBatchCompletionsV2Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_cancel_fine_tune_response.py b/launch/api_client/test/test_models/test_cancel_fine_tune_response.py new file mode 100644 index 00000000..581efacb --- /dev/null +++ b/launch/api_client/test/test_models/test_cancel_fine_tune_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.cancel_fine_tune_response import ( + CancelFineTuneResponse, +) + + +class TestCancelFineTuneResponse(unittest.TestCase): + """CancelFineTuneResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_function_call_option.py b/launch/api_client/test/test_models/test_chat_completion_function_call_option.py new file mode 100644 index 00000000..f9d6e187 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_function_call_option.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_function_call_option import ( + ChatCompletionFunctionCallOption, +) + + +class TestChatCompletionFunctionCallOption(unittest.TestCase): + """ChatCompletionFunctionCallOption unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_functions.py b/launch/api_client/test/test_models/test_chat_completion_functions.py new file mode 100644 index 00000000..ff47359b --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_functions.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_functions import ( + ChatCompletionFunctions, +) + + +class TestChatCompletionFunctions(unittest.TestCase): + """ChatCompletionFunctions unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_call.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_call.py new file mode 100644 index 00000000..2aefecbf --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_message_tool_call.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_message_tool_call import ( + ChatCompletionMessageToolCall, +) + + +class TestChatCompletionMessageToolCall(unittest.TestCase): + """ChatCompletionMessageToolCall unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_call_chunk.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_call_chunk.py new file mode 100644 index 00000000..f0e15c18 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_message_tool_call_chunk.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_message_tool_call_chunk import ( + ChatCompletionMessageToolCallChunk, +) + + +class TestChatCompletionMessageToolCallChunk(unittest.TestCase): + """ChatCompletionMessageToolCallChunk unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_input.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_input.py new file mode 100644 index 00000000..0ccacadd --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_input.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_message_tool_calls_input import ( + ChatCompletionMessageToolCallsInput, +) + + +class TestChatCompletionMessageToolCallsInput(unittest.TestCase): + """ChatCompletionMessageToolCallsInput unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_output.py b/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_output.py new file mode 100644 index 00000000..98544a22 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_message_tool_calls_output.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_message_tool_calls_output import ( + ChatCompletionMessageToolCallsOutput, +) + + +class TestChatCompletionMessageToolCallsOutput(unittest.TestCase): + """ChatCompletionMessageToolCallsOutput unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_named_tool_choice.py b/launch/api_client/test/test_models/test_chat_completion_named_tool_choice.py new file mode 100644 index 00000000..b8472a2b --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_named_tool_choice.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_named_tool_choice import ( + ChatCompletionNamedToolChoice, +) + + +class TestChatCompletionNamedToolChoice(unittest.TestCase): + """ChatCompletionNamedToolChoice unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_assistant_message.py b/launch/api_client/test/test_models/test_chat_completion_request_assistant_message.py new file mode 100644 index 00000000..1451e65d --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_assistant_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_assistant_message import ( + ChatCompletionRequestAssistantMessage, +) + + +class TestChatCompletionRequestAssistantMessage(unittest.TestCase): + """ChatCompletionRequestAssistantMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_assistant_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_assistant_message_content_part.py new file mode 100644 index 00000000..d2aaf7c5 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_assistant_message_content_part.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_assistant_message_content_part import ( + ChatCompletionRequestAssistantMessageContentPart, +) + + +class TestChatCompletionRequestAssistantMessageContentPart(unittest.TestCase): + """ChatCompletionRequestAssistantMessageContentPart unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_developer_message.py b/launch/api_client/test/test_models/test_chat_completion_request_developer_message.py new file mode 100644 index 00000000..c39bee08 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_developer_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_developer_message import ( + ChatCompletionRequestDeveloperMessage, +) + + +class TestChatCompletionRequestDeveloperMessage(unittest.TestCase): + """ChatCompletionRequestDeveloperMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_function_message.py b/launch/api_client/test/test_models/test_chat_completion_request_function_message.py new file mode 100644 index 00000000..ce552c33 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_function_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_function_message import ( + ChatCompletionRequestFunctionMessage, +) + + +class TestChatCompletionRequestFunctionMessage(unittest.TestCase): + """ChatCompletionRequestFunctionMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message.py b/launch/api_client/test/test_models/test_chat_completion_request_message.py new file mode 100644 index 00000000..5d5f9657 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_message import ( + ChatCompletionRequestMessage, +) + + +class TestChatCompletionRequestMessage(unittest.TestCase): + """ChatCompletionRequestMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_audio.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_audio.py new file mode 100644 index 00000000..94c867f4 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_audio.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_message_content_part_audio import ( + ChatCompletionRequestMessageContentPartAudio, +) + + +class TestChatCompletionRequestMessageContentPartAudio(unittest.TestCase): + """ChatCompletionRequestMessageContentPartAudio unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_file.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_file.py new file mode 100644 index 00000000..f41daf86 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_file.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_message_content_part_file import ( + ChatCompletionRequestMessageContentPartFile, +) + + +class TestChatCompletionRequestMessageContentPartFile(unittest.TestCase): + """ChatCompletionRequestMessageContentPartFile unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_image.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_image.py new file mode 100644 index 00000000..eec5e97b --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_image.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_message_content_part_image import ( + ChatCompletionRequestMessageContentPartImage, +) + + +class TestChatCompletionRequestMessageContentPartImage(unittest.TestCase): + """ChatCompletionRequestMessageContentPartImage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_refusal.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_refusal.py new file mode 100644 index 00000000..6f3a05ac --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_refusal.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_message_content_part_refusal import ( + ChatCompletionRequestMessageContentPartRefusal, +) + + +class TestChatCompletionRequestMessageContentPartRefusal(unittest.TestCase): + """ChatCompletionRequestMessageContentPartRefusal unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_text.py b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_text.py new file mode 100644 index 00000000..2e527875 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_message_content_part_text.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_message_content_part_text import ( + ChatCompletionRequestMessageContentPartText, +) + + +class TestChatCompletionRequestMessageContentPartText(unittest.TestCase): + """ChatCompletionRequestMessageContentPartText unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_system_message.py b/launch/api_client/test/test_models/test_chat_completion_request_system_message.py new file mode 100644 index 00000000..d6537bd6 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_system_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_system_message import ( + ChatCompletionRequestSystemMessage, +) + + +class TestChatCompletionRequestSystemMessage(unittest.TestCase): + """ChatCompletionRequestSystemMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_system_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_system_message_content_part.py new file mode 100644 index 00000000..549c3f7b --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_system_message_content_part.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_system_message_content_part import ( + ChatCompletionRequestSystemMessageContentPart, +) + + +class TestChatCompletionRequestSystemMessageContentPart(unittest.TestCase): + """ChatCompletionRequestSystemMessageContentPart unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_tool_message.py b/launch/api_client/test/test_models/test_chat_completion_request_tool_message.py new file mode 100644 index 00000000..d95444ab --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_tool_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_tool_message import ( + ChatCompletionRequestToolMessage, +) + + +class TestChatCompletionRequestToolMessage(unittest.TestCase): + """ChatCompletionRequestToolMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_tool_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_tool_message_content_part.py new file mode 100644 index 00000000..46d2afa1 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_tool_message_content_part.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_tool_message_content_part import ( + ChatCompletionRequestToolMessageContentPart, +) + + +class TestChatCompletionRequestToolMessageContentPart(unittest.TestCase): + """ChatCompletionRequestToolMessageContentPart unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_user_message.py b/launch/api_client/test/test_models/test_chat_completion_request_user_message.py new file mode 100644 index 00000000..06bc068a --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_user_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_user_message import ( + ChatCompletionRequestUserMessage, +) + + +class TestChatCompletionRequestUserMessage(unittest.TestCase): + """ChatCompletionRequestUserMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_request_user_message_content_part.py b/launch/api_client/test/test_models/test_chat_completion_request_user_message_content_part.py new file mode 100644 index 00000000..7bc9b522 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_request_user_message_content_part.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_request_user_message_content_part import ( + ChatCompletionRequestUserMessageContentPart, +) + + +class TestChatCompletionRequestUserMessageContentPart(unittest.TestCase): + """ChatCompletionRequestUserMessageContentPart unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_response_message.py b/launch/api_client/test/test_models/test_chat_completion_response_message.py new file mode 100644 index 00000000..f9c75f59 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_response_message.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_response_message import ( + ChatCompletionResponseMessage, +) + + +class TestChatCompletionResponseMessage(unittest.TestCase): + """ChatCompletionResponseMessage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_stream_options.py b/launch/api_client/test/test_models/test_chat_completion_stream_options.py new file mode 100644 index 00000000..e52f49a2 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_stream_options.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_stream_options import ( + ChatCompletionStreamOptions, +) + + +class TestChatCompletionStreamOptions(unittest.TestCase): + """ChatCompletionStreamOptions unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_stream_response_delta.py b/launch/api_client/test/test_models/test_chat_completion_stream_response_delta.py new file mode 100644 index 00000000..8456fdee --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_stream_response_delta.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_stream_response_delta import ( + ChatCompletionStreamResponseDelta, +) + + +class TestChatCompletionStreamResponseDelta(unittest.TestCase): + """ChatCompletionStreamResponseDelta unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_token_logprob.py b/launch/api_client/test/test_models/test_chat_completion_token_logprob.py new file mode 100644 index 00000000..65e63e6a --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_token_logprob.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_token_logprob import ( + ChatCompletionTokenLogprob, +) + + +class TestChatCompletionTokenLogprob(unittest.TestCase): + """ChatCompletionTokenLogprob unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_tool.py b/launch/api_client/test/test_models/test_chat_completion_tool.py new file mode 100644 index 00000000..a7fe123e --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_tool.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_tool import ChatCompletionTool + + +class TestChatCompletionTool(unittest.TestCase): + """ChatCompletionTool unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_tool_choice_option.py b/launch/api_client/test/test_models/test_chat_completion_tool_choice_option.py new file mode 100644 index 00000000..9330c8e6 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_tool_choice_option.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_tool_choice_option import ( + ChatCompletionToolChoiceOption, +) + + +class TestChatCompletionToolChoiceOption(unittest.TestCase): + """ChatCompletionToolChoiceOption unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_v2_request.py b/launch/api_client/test/test_models/test_chat_completion_v2_request.py new file mode 100644 index 00000000..6117d307 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_v2_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_v2_request import ( + ChatCompletionV2Request, +) + + +class TestChatCompletionV2Request(unittest.TestCase): + """ChatCompletionV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_chat_completion_v2_stream_error_chunk.py b/launch/api_client/test/test_models/test_chat_completion_v2_stream_error_chunk.py new file mode 100644 index 00000000..5ed2c7e0 --- /dev/null +++ b/launch/api_client/test/test_models/test_chat_completion_v2_stream_error_chunk.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.chat_completion_v2_stream_error_chunk import ( + ChatCompletionV2StreamErrorChunk, +) + + +class TestChatCompletionV2StreamErrorChunk(unittest.TestCase): + """ChatCompletionV2StreamErrorChunk unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_choice.py b/launch/api_client/test/test_models/test_choice.py new file mode 100644 index 00000000..853f8b63 --- /dev/null +++ b/launch/api_client/test/test_models/test_choice.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.choice import Choice + + +class TestChoice(unittest.TestCase): + """Choice unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_choice1.py b/launch/api_client/test/test_models/test_choice1.py new file mode 100644 index 00000000..2f74ca27 --- /dev/null +++ b/launch/api_client/test/test_models/test_choice1.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.choice1 import Choice1 + + +class TestChoice1(unittest.TestCase): + """Choice1 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_choice2.py b/launch/api_client/test/test_models/test_choice2.py new file mode 100644 index 00000000..fdc2936e --- /dev/null +++ b/launch/api_client/test/test_models/test_choice2.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.choice2 import Choice2 + + +class TestChoice2(unittest.TestCase): + """Choice2 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_clone_model_bundle_v1_request.py b/launch/api_client/test/test_models/test_clone_model_bundle_v1_request.py new file mode 100644 index 00000000..b69c30dc --- /dev/null +++ b/launch/api_client/test/test_models/test_clone_model_bundle_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.clone_model_bundle_v1_request import ( + CloneModelBundleV1Request, +) + + +class TestCloneModelBundleV1Request(unittest.TestCase): + """CloneModelBundleV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_clone_model_bundle_v2_request.py b/launch/api_client/test/test_models/test_clone_model_bundle_v2_request.py new file mode 100644 index 00000000..7f31450d --- /dev/null +++ b/launch/api_client/test/test_models/test_clone_model_bundle_v2_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.clone_model_bundle_v2_request import ( + CloneModelBundleV2Request, +) + + +class TestCloneModelBundleV2Request(unittest.TestCase): + """CloneModelBundleV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_cloudpickle_artifact_flavor.py b/launch/api_client/test/test_models/test_cloudpickle_artifact_flavor.py new file mode 100644 index 00000000..e5696f3a --- /dev/null +++ b/launch/api_client/test/test_models/test_cloudpickle_artifact_flavor.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.cloudpickle_artifact_flavor import ( + CloudpickleArtifactFlavor, +) + + +class TestCloudpickleArtifactFlavor(unittest.TestCase): + """CloudpickleArtifactFlavor unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_output.py b/launch/api_client/test/test_models/test_completion_output.py new file mode 100644 index 00000000..b402a68f --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_output.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_output import CompletionOutput + + +class TestCompletionOutput(unittest.TestCase): + """CompletionOutput unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_stream_output.py b/launch/api_client/test/test_models/test_completion_stream_output.py new file mode 100644 index 00000000..fa8aa3ca --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_stream_output.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_stream_output import ( + CompletionStreamOutput, +) + + +class TestCompletionStreamOutput(unittest.TestCase): + """CompletionStreamOutput unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_stream_v1_request.py b/launch/api_client/test/test_models/test_completion_stream_v1_request.py new file mode 100644 index 00000000..d6d9ea03 --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_stream_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_stream_v1_request import ( + CompletionStreamV1Request, +) + + +class TestCompletionStreamV1Request(unittest.TestCase): + """CompletionStreamV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_stream_v1_response.py b/launch/api_client/test/test_models/test_completion_stream_v1_response.py new file mode 100644 index 00000000..559c0ae0 --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_stream_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_stream_v1_response import ( + CompletionStreamV1Response, +) + + +class TestCompletionStreamV1Response(unittest.TestCase): + """CompletionStreamV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_sync_v1_request.py b/launch/api_client/test/test_models/test_completion_sync_v1_request.py new file mode 100644 index 00000000..116293d3 --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_sync_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_sync_v1_request import ( + CompletionSyncV1Request, +) + + +class TestCompletionSyncV1Request(unittest.TestCase): + """CompletionSyncV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_sync_v1_response.py b/launch/api_client/test/test_models/test_completion_sync_v1_response.py new file mode 100644 index 00000000..15d6fa17 --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_sync_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_sync_v1_response import ( + CompletionSyncV1Response, +) + + +class TestCompletionSyncV1Response(unittest.TestCase): + """CompletionSyncV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_tokens_details.py b/launch/api_client/test/test_models/test_completion_tokens_details.py new file mode 100644 index 00000000..1726e781 --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_tokens_details.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_tokens_details import ( + CompletionTokensDetails, +) + + +class TestCompletionTokensDetails(unittest.TestCase): + """CompletionTokensDetails unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_usage.py b/launch/api_client/test/test_models/test_completion_usage.py new file mode 100644 index 00000000..f328d79e --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_usage.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_usage import CompletionUsage + + +class TestCompletionUsage(unittest.TestCase): + """CompletionUsage unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_v2_request.py b/launch/api_client/test/test_models/test_completion_v2_request.py new file mode 100644 index 00000000..ea95737a --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_v2_request.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_v2_request import CompletionV2Request + + +class TestCompletionV2Request(unittest.TestCase): + """CompletionV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_completion_v2_stream_error_chunk.py b/launch/api_client/test/test_models/test_completion_v2_stream_error_chunk.py new file mode 100644 index 00000000..588cc489 --- /dev/null +++ b/launch/api_client/test/test_models/test_completion_v2_stream_error_chunk.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.completion_v2_stream_error_chunk import ( + CompletionV2StreamErrorChunk, +) + + +class TestCompletionV2StreamErrorChunk(unittest.TestCase): + """CompletionV2StreamErrorChunk unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_content.py b/launch/api_client/test/test_models/test_content.py new file mode 100644 index 00000000..086be440 --- /dev/null +++ b/launch/api_client/test/test_models/test_content.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.content import Content + + +class TestContent(unittest.TestCase): + """Content unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_content1.py b/launch/api_client/test/test_models/test_content1.py new file mode 100644 index 00000000..34ea8307 --- /dev/null +++ b/launch/api_client/test/test_models/test_content1.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.content1 import Content1 + + +class TestContent1(unittest.TestCase): + """Content1 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_content2.py b/launch/api_client/test/test_models/test_content2.py new file mode 100644 index 00000000..a17771b2 --- /dev/null +++ b/launch/api_client/test/test_models/test_content2.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.content2 import Content2 + + +class TestContent2(unittest.TestCase): + """Content2 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_content3.py b/launch/api_client/test/test_models/test_content3.py new file mode 100644 index 00000000..9c4e8ab6 --- /dev/null +++ b/launch/api_client/test/test_models/test_content3.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.content3 import Content3 + + +class TestContent3(unittest.TestCase): + """Content3 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_content4.py b/launch/api_client/test/test_models/test_content4.py new file mode 100644 index 00000000..8d4a9d8c --- /dev/null +++ b/launch/api_client/test/test_models/test_content4.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.content4 import Content4 + + +class TestContent4(unittest.TestCase): + """Content4 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_content8.py b/launch/api_client/test/test_models/test_content8.py new file mode 100644 index 00000000..cc2d68a8 --- /dev/null +++ b/launch/api_client/test/test_models/test_content8.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.content8 import Content8 + + +class TestContent8(unittest.TestCase): + """Content8 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_async_task_v1_response.py b/launch/api_client/test/test_models/test_create_async_task_v1_response.py new file mode 100644 index 00000000..5df09f60 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_async_task_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_async_task_v1_response import ( + CreateAsyncTaskV1Response, +) + + +class TestCreateAsyncTaskV1Response(unittest.TestCase): + """CreateAsyncTaskV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_model_config.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_model_config.py new file mode 100644 index 00000000..66de0e9b --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_completions_v1_model_config.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_completions_v1_model_config import ( + CreateBatchCompletionsV1ModelConfig, +) + + +class TestCreateBatchCompletionsV1ModelConfig(unittest.TestCase): + """CreateBatchCompletionsV1ModelConfig unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_request.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_request.py new file mode 100644 index 00000000..4a0c35f8 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_completions_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_completions_v1_request import ( + CreateBatchCompletionsV1Request, +) + + +class TestCreateBatchCompletionsV1Request(unittest.TestCase): + """CreateBatchCompletionsV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_request_content.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_request_content.py new file mode 100644 index 00000000..d83e4ab9 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_completions_v1_request_content.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_completions_v1_request_content import ( + CreateBatchCompletionsV1RequestContent, +) + + +class TestCreateBatchCompletionsV1RequestContent(unittest.TestCase): + """CreateBatchCompletionsV1RequestContent unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v1_response.py b/launch/api_client/test/test_models/test_create_batch_completions_v1_response.py new file mode 100644 index 00000000..3178d605 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_completions_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_completions_v1_response import ( + CreateBatchCompletionsV1Response, +) + + +class TestCreateBatchCompletionsV1Response(unittest.TestCase): + """CreateBatchCompletionsV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_completions_v2_request.py b/launch/api_client/test/test_models/test_create_batch_completions_v2_request.py new file mode 100644 index 00000000..0b3f52c7 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_completions_v2_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_completions_v2_request import ( + CreateBatchCompletionsV2Request, +) + + +class TestCreateBatchCompletionsV2Request(unittest.TestCase): + """CreateBatchCompletionsV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_job_resource_requests.py b/launch/api_client/test/test_models/test_create_batch_job_resource_requests.py new file mode 100644 index 00000000..5899dc99 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_job_resource_requests.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_job_resource_requests import ( + CreateBatchJobResourceRequests, +) + + +class TestCreateBatchJobResourceRequests(unittest.TestCase): + """CreateBatchJobResourceRequests unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_job_v1_request.py b/launch/api_client/test/test_models/test_create_batch_job_v1_request.py new file mode 100644 index 00000000..66fa7437 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_job_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_job_v1_request import ( + CreateBatchJobV1Request, +) + + +class TestCreateBatchJobV1Request(unittest.TestCase): + """CreateBatchJobV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_batch_job_v1_response.py b/launch/api_client/test/test_models/test_create_batch_job_v1_response.py new file mode 100644 index 00000000..a468158b --- /dev/null +++ b/launch/api_client/test/test_models/test_create_batch_job_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_batch_job_v1_response import ( + CreateBatchJobV1Response, +) + + +class TestCreateBatchJobV1Response(unittest.TestCase): + """CreateBatchJobV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_chat_completion_response.py b/launch/api_client/test/test_models/test_create_chat_completion_response.py new file mode 100644 index 00000000..ba7985e2 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_chat_completion_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_chat_completion_response import ( + CreateChatCompletionResponse, +) + + +class TestCreateChatCompletionResponse(unittest.TestCase): + """CreateChatCompletionResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_chat_completion_stream_response.py b/launch/api_client/test/test_models/test_create_chat_completion_stream_response.py new file mode 100644 index 00000000..e5d414f5 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_chat_completion_stream_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_chat_completion_stream_response import ( + CreateChatCompletionStreamResponse, +) + + +class TestCreateChatCompletionStreamResponse(unittest.TestCase): + """CreateChatCompletionStreamResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_completion_response.py b/launch/api_client/test/test_models/test_create_completion_response.py new file mode 100644 index 00000000..30ff501d --- /dev/null +++ b/launch/api_client/test/test_models/test_create_completion_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_completion_response import ( + CreateCompletionResponse, +) + + +class TestCreateCompletionResponse(unittest.TestCase): + """CreateCompletionResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_deep_speed_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_deep_speed_model_endpoint_request.py new file mode 100644 index 00000000..358d0d15 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_deep_speed_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_deep_speed_model_endpoint_request import ( + CreateDeepSpeedModelEndpointRequest, +) + + +class TestCreateDeepSpeedModelEndpointRequest(unittest.TestCase): + """CreateDeepSpeedModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_request.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_request.py new file mode 100644 index 00000000..1cd370df --- /dev/null +++ b/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_docker_image_batch_job_bundle_v1_request import ( + CreateDockerImageBatchJobBundleV1Request, +) + + +class TestCreateDockerImageBatchJobBundleV1Request(unittest.TestCase): + """CreateDockerImageBatchJobBundleV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_response.py new file mode 100644 index 00000000..64c05b93 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_docker_image_batch_job_bundle_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_docker_image_batch_job_bundle_v1_response import ( + CreateDockerImageBatchJobBundleV1Response, +) + + +class TestCreateDockerImageBatchJobBundleV1Response(unittest.TestCase): + """CreateDockerImageBatchJobBundleV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_resource_requests.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_resource_requests.py new file mode 100644 index 00000000..c8e7d475 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_docker_image_batch_job_resource_requests.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_docker_image_batch_job_resource_requests import ( + CreateDockerImageBatchJobResourceRequests, +) + + +class TestCreateDockerImageBatchJobResourceRequests(unittest.TestCase): + """CreateDockerImageBatchJobResourceRequests unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_request.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_request.py new file mode 100644 index 00000000..43a17ed4 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_docker_image_batch_job_v1_request import ( + CreateDockerImageBatchJobV1Request, +) + + +class TestCreateDockerImageBatchJobV1Request(unittest.TestCase): + """CreateDockerImageBatchJobV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_response.py b/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_response.py new file mode 100644 index 00000000..d00f1a5c --- /dev/null +++ b/launch/api_client/test/test_models/test_create_docker_image_batch_job_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_docker_image_batch_job_v1_response import ( + CreateDockerImageBatchJobV1Response, +) + + +class TestCreateDockerImageBatchJobV1Response(unittest.TestCase): + """CreateDockerImageBatchJobV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_fine_tune_request.py b/launch/api_client/test/test_models/test_create_fine_tune_request.py new file mode 100644 index 00000000..cb5c9130 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_fine_tune_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_fine_tune_request import ( + CreateFineTuneRequest, +) + + +class TestCreateFineTuneRequest(unittest.TestCase): + """CreateFineTuneRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_fine_tune_response.py b/launch/api_client/test/test_models/test_create_fine_tune_response.py new file mode 100644 index 00000000..3c1976ca --- /dev/null +++ b/launch/api_client/test/test_models/test_create_fine_tune_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_fine_tune_response import ( + CreateFineTuneResponse, +) + + +class TestCreateFineTuneResponse(unittest.TestCase): + """CreateFineTuneResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_light_llm_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_light_llm_model_endpoint_request.py new file mode 100644 index 00000000..de9f89ea --- /dev/null +++ b/launch/api_client/test/test_models/test_create_light_llm_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_light_llm_model_endpoint_request import ( + CreateLightLLMModelEndpointRequest, +) + + +class TestCreateLightLLMModelEndpointRequest(unittest.TestCase): + """CreateLightLLMModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_request.py new file mode 100644 index 00000000..5193285d --- /dev/null +++ b/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_llm_model_endpoint_v1_request import ( + CreateLLMModelEndpointV1Request, +) + + +class TestCreateLLMModelEndpointV1Request(unittest.TestCase): + """CreateLLMModelEndpointV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_response.py new file mode 100644 index 00000000..bb13b794 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_llm_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_llm_model_endpoint_v1_response import ( + CreateLLMModelEndpointV1Response, +) + + +class TestCreateLLMModelEndpointV1Response(unittest.TestCase): + """CreateLLMModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v1_request.py b/launch/api_client/test/test_models/test_create_model_bundle_v1_request.py new file mode 100644 index 00000000..ac1b7509 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_model_bundle_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_model_bundle_v1_request import ( + CreateModelBundleV1Request, +) + + +class TestCreateModelBundleV1Request(unittest.TestCase): + """CreateModelBundleV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v1_response.py b/launch/api_client/test/test_models/test_create_model_bundle_v1_response.py new file mode 100644 index 00000000..3cba4c7c --- /dev/null +++ b/launch/api_client/test/test_models/test_create_model_bundle_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_model_bundle_v1_response import ( + CreateModelBundleV1Response, +) + + +class TestCreateModelBundleV1Response(unittest.TestCase): + """CreateModelBundleV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v2_request.py b/launch/api_client/test/test_models/test_create_model_bundle_v2_request.py new file mode 100644 index 00000000..8ba28a24 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_model_bundle_v2_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_model_bundle_v2_request import ( + CreateModelBundleV2Request, +) + + +class TestCreateModelBundleV2Request(unittest.TestCase): + """CreateModelBundleV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_bundle_v2_response.py b/launch/api_client/test/test_models/test_create_model_bundle_v2_response.py new file mode 100644 index 00000000..b14593f7 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_model_bundle_v2_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_model_bundle_v2_response import ( + CreateModelBundleV2Response, +) + + +class TestCreateModelBundleV2Response(unittest.TestCase): + """CreateModelBundleV2Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_create_model_endpoint_v1_request.py new file mode 100644 index 00000000..3504cc52 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_model_endpoint_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_model_endpoint_v1_request import ( + CreateModelEndpointV1Request, +) + + +class TestCreateModelEndpointV1Request(unittest.TestCase): + """CreateModelEndpointV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_create_model_endpoint_v1_response.py new file mode 100644 index 00000000..b15ebe7c --- /dev/null +++ b/launch/api_client/test/test_models/test_create_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_model_endpoint_v1_response import ( + CreateModelEndpointV1Response, +) + + +class TestCreateModelEndpointV1Response(unittest.TestCase): + """CreateModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_sg_lang_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_sg_lang_model_endpoint_request.py new file mode 100644 index 00000000..3dbc62c6 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_sg_lang_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_sg_lang_model_endpoint_request import ( + CreateSGLangModelEndpointRequest, +) + + +class TestCreateSGLangModelEndpointRequest(unittest.TestCase): + """CreateSGLangModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_tensor_rtllm_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_tensor_rtllm_model_endpoint_request.py new file mode 100644 index 00000000..f323417e --- /dev/null +++ b/launch/api_client/test/test_models/test_create_tensor_rtllm_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_tensor_rtllm_model_endpoint_request import ( + CreateTensorRTLLMModelEndpointRequest, +) + + +class TestCreateTensorRTLLMModelEndpointRequest(unittest.TestCase): + """CreateTensorRTLLMModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_text_generation_inference_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_text_generation_inference_model_endpoint_request.py new file mode 100644 index 00000000..2cce6e45 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_text_generation_inference_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_text_generation_inference_model_endpoint_request import ( + CreateTextGenerationInferenceModelEndpointRequest, +) + + +class TestCreateTextGenerationInferenceModelEndpointRequest(unittest.TestCase): + """CreateTextGenerationInferenceModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_trigger_v1_request.py b/launch/api_client/test/test_models/test_create_trigger_v1_request.py new file mode 100644 index 00000000..b4022ac5 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_trigger_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_trigger_v1_request import ( + CreateTriggerV1Request, +) + + +class TestCreateTriggerV1Request(unittest.TestCase): + """CreateTriggerV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_trigger_v1_response.py b/launch/api_client/test/test_models/test_create_trigger_v1_response.py new file mode 100644 index 00000000..ceef3d29 --- /dev/null +++ b/launch/api_client/test/test_models/test_create_trigger_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_trigger_v1_response import ( + CreateTriggerV1Response, +) + + +class TestCreateTriggerV1Response(unittest.TestCase): + """CreateTriggerV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_create_vllm_model_endpoint_request.py b/launch/api_client/test/test_models/test_create_vllm_model_endpoint_request.py new file mode 100644 index 00000000..d1df7abe --- /dev/null +++ b/launch/api_client/test/test_models/test_create_vllm_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.create_vllm_model_endpoint_request import ( + CreateVLLMModelEndpointRequest, +) + + +class TestCreateVLLMModelEndpointRequest(unittest.TestCase): + """CreateVLLMModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_custom_framework.py b/launch/api_client/test/test_models/test_custom_framework.py new file mode 100644 index 00000000..0bfbff09 --- /dev/null +++ b/launch/api_client/test/test_models/test_custom_framework.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.custom_framework import CustomFramework + + +class TestCustomFramework(unittest.TestCase): + """CustomFramework unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_file_response.py b/launch/api_client/test/test_models/test_delete_file_response.py new file mode 100644 index 00000000..224e9226 --- /dev/null +++ b/launch/api_client/test/test_models/test_delete_file_response.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.delete_file_response import DeleteFileResponse + + +class TestDeleteFileResponse(unittest.TestCase): + """DeleteFileResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_llm_endpoint_response.py b/launch/api_client/test/test_models/test_delete_llm_endpoint_response.py new file mode 100644 index 00000000..28cfe878 --- /dev/null +++ b/launch/api_client/test/test_models/test_delete_llm_endpoint_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.delete_llm_endpoint_response import ( + DeleteLLMEndpointResponse, +) + + +class TestDeleteLLMEndpointResponse(unittest.TestCase): + """DeleteLLMEndpointResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_delete_model_endpoint_v1_response.py new file mode 100644 index 00000000..88e0c91c --- /dev/null +++ b/launch/api_client/test/test_models/test_delete_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.delete_model_endpoint_v1_response import ( + DeleteModelEndpointV1Response, +) + + +class TestDeleteModelEndpointV1Response(unittest.TestCase): + """DeleteModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_delete_trigger_v1_response.py b/launch/api_client/test/test_models/test_delete_trigger_v1_response.py new file mode 100644 index 00000000..85e48e44 --- /dev/null +++ b/launch/api_client/test/test_models/test_delete_trigger_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.delete_trigger_v1_response import ( + DeleteTriggerV1Response, +) + + +class TestDeleteTriggerV1Response(unittest.TestCase): + """DeleteTriggerV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_docker_image_batch_job.py b/launch/api_client/test/test_models/test_docker_image_batch_job.py new file mode 100644 index 00000000..4b92fcec --- /dev/null +++ b/launch/api_client/test/test_models/test_docker_image_batch_job.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.docker_image_batch_job import DockerImageBatchJob + + +class TestDockerImageBatchJob(unittest.TestCase): + """DockerImageBatchJob unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/test/test_models/test_docker_image_batch_job_bundle_v1_response.py new file mode 100644 index 00000000..c5681e07 --- /dev/null +++ b/launch/api_client/test/test_models/test_docker_image_batch_job_bundle_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.docker_image_batch_job_bundle_v1_response import ( + DockerImageBatchJobBundleV1Response, +) + + +class TestDockerImageBatchJobBundleV1Response(unittest.TestCase): + """DockerImageBatchJobBundleV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_endpoint_predict_v1_request.py b/launch/api_client/test/test_models/test_endpoint_predict_v1_request.py new file mode 100644 index 00000000..a2380e7f --- /dev/null +++ b/launch/api_client/test/test_models/test_endpoint_predict_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.endpoint_predict_v1_request import ( + EndpointPredictV1Request, +) + + +class TestEndpointPredictV1Request(unittest.TestCase): + """EndpointPredictV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_file.py b/launch/api_client/test/test_models/test_file.py new file mode 100644 index 00000000..75f7173b --- /dev/null +++ b/launch/api_client/test/test_models/test_file.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.file import File + + +class TestFile(unittest.TestCase): + """File unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_filtered_chat_completion_v2_request.py b/launch/api_client/test/test_models/test_filtered_chat_completion_v2_request.py new file mode 100644 index 00000000..887889e8 --- /dev/null +++ b/launch/api_client/test/test_models/test_filtered_chat_completion_v2_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.filtered_chat_completion_v2_request import ( + FilteredChatCompletionV2Request, +) + + +class TestFilteredChatCompletionV2Request(unittest.TestCase): + """FilteredChatCompletionV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_filtered_completion_v2_request.py b/launch/api_client/test/test_models/test_filtered_completion_v2_request.py new file mode 100644 index 00000000..993945b7 --- /dev/null +++ b/launch/api_client/test/test_models/test_filtered_completion_v2_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.filtered_completion_v2_request import ( + FilteredCompletionV2Request, +) + + +class TestFilteredCompletionV2Request(unittest.TestCase): + """FilteredCompletionV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_function1.py b/launch/api_client/test/test_models/test_function1.py new file mode 100644 index 00000000..3be24663 --- /dev/null +++ b/launch/api_client/test/test_models/test_function1.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.function1 import Function1 + + +class TestFunction1(unittest.TestCase): + """Function1 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_function2.py b/launch/api_client/test/test_models/test_function2.py new file mode 100644 index 00000000..9ef3b4b3 --- /dev/null +++ b/launch/api_client/test/test_models/test_function2.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.function2 import Function2 + + +class TestFunction2(unittest.TestCase): + """Function2 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_function3.py b/launch/api_client/test/test_models/test_function3.py new file mode 100644 index 00000000..14bf6d5e --- /dev/null +++ b/launch/api_client/test/test_models/test_function3.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.function3 import Function3 + + +class TestFunction3(unittest.TestCase): + """Function3 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_function_call.py b/launch/api_client/test/test_models/test_function_call.py new file mode 100644 index 00000000..2e3ed603 --- /dev/null +++ b/launch/api_client/test/test_models/test_function_call.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.function_call import FunctionCall + + +class TestFunctionCall(unittest.TestCase): + """FunctionCall unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_function_call2.py b/launch/api_client/test/test_models/test_function_call2.py new file mode 100644 index 00000000..6477b506 --- /dev/null +++ b/launch/api_client/test/test_models/test_function_call2.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.function_call2 import FunctionCall2 + + +class TestFunctionCall2(unittest.TestCase): + """FunctionCall2 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_function_object.py b/launch/api_client/test/test_models/test_function_object.py new file mode 100644 index 00000000..6debac41 --- /dev/null +++ b/launch/api_client/test/test_models/test_function_object.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.function_object import FunctionObject + + +class TestFunctionObject(unittest.TestCase): + """FunctionObject unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_function_parameters.py b/launch/api_client/test/test_models/test_function_parameters.py new file mode 100644 index 00000000..a58e1c57 --- /dev/null +++ b/launch/api_client/test/test_models/test_function_parameters.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.function_parameters import FunctionParameters + + +class TestFunctionParameters(unittest.TestCase): + """FunctionParameters unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_async_task_v1_response.py b/launch/api_client/test/test_models/test_get_async_task_v1_response.py new file mode 100644 index 00000000..969c9f1e --- /dev/null +++ b/launch/api_client/test/test_models/test_get_async_task_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_async_task_v1_response import ( + GetAsyncTaskV1Response, +) + + +class TestGetAsyncTaskV1Response(unittest.TestCase): + """GetAsyncTaskV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_batch_completion_v2_response.py b/launch/api_client/test/test_models/test_get_batch_completion_v2_response.py new file mode 100644 index 00000000..e80b0c5c --- /dev/null +++ b/launch/api_client/test/test_models/test_get_batch_completion_v2_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_batch_completion_v2_response import ( + GetBatchCompletionV2Response, +) + + +class TestGetBatchCompletionV2Response(unittest.TestCase): + """GetBatchCompletionV2Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_batch_job_v1_response.py b/launch/api_client/test/test_models/test_get_batch_job_v1_response.py new file mode 100644 index 00000000..8d9d2d89 --- /dev/null +++ b/launch/api_client/test/test_models/test_get_batch_job_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_batch_job_v1_response import ( + GetBatchJobV1Response, +) + + +class TestGetBatchJobV1Response(unittest.TestCase): + """GetBatchJobV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_docker_image_batch_job_v1_response.py b/launch/api_client/test/test_models/test_get_docker_image_batch_job_v1_response.py new file mode 100644 index 00000000..c55ec80a --- /dev/null +++ b/launch/api_client/test/test_models/test_get_docker_image_batch_job_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_docker_image_batch_job_v1_response import ( + GetDockerImageBatchJobV1Response, +) + + +class TestGetDockerImageBatchJobV1Response(unittest.TestCase): + """GetDockerImageBatchJobV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_file_content_response.py b/launch/api_client/test/test_models/test_get_file_content_response.py new file mode 100644 index 00000000..bc938a47 --- /dev/null +++ b/launch/api_client/test/test_models/test_get_file_content_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_file_content_response import ( + GetFileContentResponse, +) + + +class TestGetFileContentResponse(unittest.TestCase): + """GetFileContentResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_file_response.py b/launch/api_client/test/test_models/test_get_file_response.py new file mode 100644 index 00000000..cd55395b --- /dev/null +++ b/launch/api_client/test/test_models/test_get_file_response.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_file_response import GetFileResponse + + +class TestGetFileResponse(unittest.TestCase): + """GetFileResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_fine_tune_events_response.py b/launch/api_client/test/test_models/test_get_fine_tune_events_response.py new file mode 100644 index 00000000..67e40775 --- /dev/null +++ b/launch/api_client/test/test_models/test_get_fine_tune_events_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_fine_tune_events_response import ( + GetFineTuneEventsResponse, +) + + +class TestGetFineTuneEventsResponse(unittest.TestCase): + """GetFineTuneEventsResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_fine_tune_response.py b/launch/api_client/test/test_models/test_get_fine_tune_response.py new file mode 100644 index 00000000..42eedc90 --- /dev/null +++ b/launch/api_client/test/test_models/test_get_fine_tune_response.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_fine_tune_response import GetFineTuneResponse + + +class TestGetFineTuneResponse(unittest.TestCase): + """GetFineTuneResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_llm_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_get_llm_model_endpoint_v1_response.py new file mode 100644 index 00000000..7214f77a --- /dev/null +++ b/launch/api_client/test/test_models/test_get_llm_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_llm_model_endpoint_v1_response import ( + GetLLMModelEndpointV1Response, +) + + +class TestGetLLMModelEndpointV1Response(unittest.TestCase): + """GetLLMModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_get_model_endpoint_v1_response.py new file mode 100644 index 00000000..e70ce97d --- /dev/null +++ b/launch/api_client/test/test_models/test_get_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_model_endpoint_v1_response import ( + GetModelEndpointV1Response, +) + + +class TestGetModelEndpointV1Response(unittest.TestCase): + """GetModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_get_trigger_v1_response.py b/launch/api_client/test/test_models/test_get_trigger_v1_response.py new file mode 100644 index 00000000..df0ec899 --- /dev/null +++ b/launch/api_client/test/test_models/test_get_trigger_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.get_trigger_v1_response import ( + GetTriggerV1Response, +) + + +class TestGetTriggerV1Response(unittest.TestCase): + """GetTriggerV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_gpu_type.py b/launch/api_client/test/test_models/test_gpu_type.py new file mode 100644 index 00000000..dd3395b6 --- /dev/null +++ b/launch/api_client/test/test_models/test_gpu_type.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.gpu_type import GpuType + + +class TestGpuType(unittest.TestCase): + """GpuType unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_http_validation_error.py b/launch/api_client/test/test_models/test_http_validation_error.py new file mode 100644 index 00000000..3a40588f --- /dev/null +++ b/launch/api_client/test/test_models/test_http_validation_error.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.http_validation_error import HTTPValidationError + + +class TestHTTPValidationError(unittest.TestCase): + """HTTPValidationError unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_image_url.py b/launch/api_client/test/test_models/test_image_url.py new file mode 100644 index 00000000..2f64d9ac --- /dev/null +++ b/launch/api_client/test/test_models/test_image_url.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.image_url import ImageUrl + + +class TestImageUrl(unittest.TestCase): + """ImageUrl unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_input_audio.py b/launch/api_client/test/test_models/test_input_audio.py new file mode 100644 index 00000000..10aae18b --- /dev/null +++ b/launch/api_client/test/test_models/test_input_audio.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.input_audio import InputAudio + + +class TestInputAudio(unittest.TestCase): + """InputAudio unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_json_schema.py b/launch/api_client/test/test_models/test_json_schema.py new file mode 100644 index 00000000..17e55216 --- /dev/null +++ b/launch/api_client/test/test_models/test_json_schema.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.json_schema import JsonSchema + + +class TestJsonSchema(unittest.TestCase): + """JsonSchema unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_docker_image_batch_job_bundle_v1_response.py b/launch/api_client/test/test_models/test_list_docker_image_batch_job_bundle_v1_response.py new file mode 100644 index 00000000..545b2e92 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_docker_image_batch_job_bundle_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_docker_image_batch_job_bundle_v1_response import ( + ListDockerImageBatchJobBundleV1Response, +) + + +class TestListDockerImageBatchJobBundleV1Response(unittest.TestCase): + """ListDockerImageBatchJobBundleV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_docker_image_batch_jobs_v1_response.py b/launch/api_client/test/test_models/test_list_docker_image_batch_jobs_v1_response.py new file mode 100644 index 00000000..d34e65e9 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_docker_image_batch_jobs_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_docker_image_batch_jobs_v1_response import ( + ListDockerImageBatchJobsV1Response, +) + + +class TestListDockerImageBatchJobsV1Response(unittest.TestCase): + """ListDockerImageBatchJobsV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_files_response.py b/launch/api_client/test/test_models/test_list_files_response.py new file mode 100644 index 00000000..596f5970 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_files_response.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_files_response import ListFilesResponse + + +class TestListFilesResponse(unittest.TestCase): + """ListFilesResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_fine_tunes_response.py b/launch/api_client/test/test_models/test_list_fine_tunes_response.py new file mode 100644 index 00000000..8c46f410 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_fine_tunes_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_fine_tunes_response import ( + ListFineTunesResponse, +) + + +class TestListFineTunesResponse(unittest.TestCase): + """ListFineTunesResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_llm_model_endpoints_v1_response.py b/launch/api_client/test/test_models/test_list_llm_model_endpoints_v1_response.py new file mode 100644 index 00000000..7abfa977 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_llm_model_endpoints_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_llm_model_endpoints_v1_response import ( + ListLLMModelEndpointsV1Response, +) + + +class TestListLLMModelEndpointsV1Response(unittest.TestCase): + """ListLLMModelEndpointsV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_model_bundles_v1_response.py b/launch/api_client/test/test_models/test_list_model_bundles_v1_response.py new file mode 100644 index 00000000..7dd7d091 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_model_bundles_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_model_bundles_v1_response import ( + ListModelBundlesV1Response, +) + + +class TestListModelBundlesV1Response(unittest.TestCase): + """ListModelBundlesV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_model_bundles_v2_response.py b/launch/api_client/test/test_models/test_list_model_bundles_v2_response.py new file mode 100644 index 00000000..1c93723f --- /dev/null +++ b/launch/api_client/test/test_models/test_list_model_bundles_v2_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_model_bundles_v2_response import ( + ListModelBundlesV2Response, +) + + +class TestListModelBundlesV2Response(unittest.TestCase): + """ListModelBundlesV2Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_model_endpoints_v1_response.py b/launch/api_client/test/test_models/test_list_model_endpoints_v1_response.py new file mode 100644 index 00000000..fd8538a0 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_model_endpoints_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_model_endpoints_v1_response import ( + ListModelEndpointsV1Response, +) + + +class TestListModelEndpointsV1Response(unittest.TestCase): + """ListModelEndpointsV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_list_triggers_v1_response.py b/launch/api_client/test/test_models/test_list_triggers_v1_response.py new file mode 100644 index 00000000..9e41cda6 --- /dev/null +++ b/launch/api_client/test/test_models/test_list_triggers_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.list_triggers_v1_response import ( + ListTriggersV1Response, +) + + +class TestListTriggersV1Response(unittest.TestCase): + """ListTriggersV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_llm_fine_tune_event.py b/launch/api_client/test/test_models/test_llm_fine_tune_event.py new file mode 100644 index 00000000..15faec4c --- /dev/null +++ b/launch/api_client/test/test_models/test_llm_fine_tune_event.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.llm_fine_tune_event import LLMFineTuneEvent + + +class TestLLMFineTuneEvent(unittest.TestCase): + """LLMFineTuneEvent unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_llm_inference_framework.py b/launch/api_client/test/test_models/test_llm_inference_framework.py new file mode 100644 index 00000000..b5866821 --- /dev/null +++ b/launch/api_client/test/test_models/test_llm_inference_framework.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.llm_inference_framework import ( + LLMInferenceFramework, +) + + +class TestLLMInferenceFramework(unittest.TestCase): + """LLMInferenceFramework unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_llm_source.py b/launch/api_client/test/test_models/test_llm_source.py new file mode 100644 index 00000000..c581a450 --- /dev/null +++ b/launch/api_client/test/test_models/test_llm_source.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.llm_source import LLMSource + + +class TestLLMSource(unittest.TestCase): + """LLMSource unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_logprobs.py b/launch/api_client/test/test_models/test_logprobs.py new file mode 100644 index 00000000..cec6bb02 --- /dev/null +++ b/launch/api_client/test/test_models/test_logprobs.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.logprobs import Logprobs + + +class TestLogprobs(unittest.TestCase): + """Logprobs unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_logprobs2.py b/launch/api_client/test/test_models/test_logprobs2.py new file mode 100644 index 00000000..3dd1c171 --- /dev/null +++ b/launch/api_client/test/test_models/test_logprobs2.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.logprobs2 import Logprobs2 + + +class TestLogprobs2(unittest.TestCase): + """Logprobs2 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_metadata.py b/launch/api_client/test/test_models/test_metadata.py new file mode 100644 index 00000000..3b8633e6 --- /dev/null +++ b/launch/api_client/test/test_models/test_metadata.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.metadata import Metadata + + +class TestMetadata(unittest.TestCase): + """Metadata unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_environment_params.py b/launch/api_client/test/test_models/test_model_bundle_environment_params.py new file mode 100644 index 00000000..1b1296d4 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_bundle_environment_params.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_bundle_environment_params import ( + ModelBundleEnvironmentParams, +) + + +class TestModelBundleEnvironmentParams(unittest.TestCase): + """ModelBundleEnvironmentParams unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_framework_type.py b/launch/api_client/test/test_models/test_model_bundle_framework_type.py new file mode 100644 index 00000000..88199a73 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_bundle_framework_type.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_bundle_framework_type import ( + ModelBundleFrameworkType, +) + + +class TestModelBundleFrameworkType(unittest.TestCase): + """ModelBundleFrameworkType unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_order_by.py b/launch/api_client/test/test_models/test_model_bundle_order_by.py new file mode 100644 index 00000000..b1ab76fb --- /dev/null +++ b/launch/api_client/test/test_models/test_model_bundle_order_by.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_bundle_order_by import ModelBundleOrderBy + + +class TestModelBundleOrderBy(unittest.TestCase): + """ModelBundleOrderBy unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_packaging_type.py b/launch/api_client/test/test_models/test_model_bundle_packaging_type.py new file mode 100644 index 00000000..bf85bde7 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_bundle_packaging_type.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_bundle_packaging_type import ( + ModelBundlePackagingType, +) + + +class TestModelBundlePackagingType(unittest.TestCase): + """ModelBundlePackagingType unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_v1_response.py b/launch/api_client/test/test_models/test_model_bundle_v1_response.py new file mode 100644 index 00000000..ad0c1f16 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_bundle_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_bundle_v1_response import ( + ModelBundleV1Response, +) + + +class TestModelBundleV1Response(unittest.TestCase): + """ModelBundleV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_bundle_v2_response.py b/launch/api_client/test/test_models/test_model_bundle_v2_response.py new file mode 100644 index 00000000..74e704b3 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_bundle_v2_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_bundle_v2_response import ( + ModelBundleV2Response, +) + + +class TestModelBundleV2Response(unittest.TestCase): + """ModelBundleV2Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_download_request.py b/launch/api_client/test/test_models/test_model_download_request.py new file mode 100644 index 00000000..2fe09f35 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_download_request.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_download_request import ModelDownloadRequest + + +class TestModelDownloadRequest(unittest.TestCase): + """ModelDownloadRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_download_response.py b/launch/api_client/test/test_models/test_model_download_response.py new file mode 100644 index 00000000..d6a0285d --- /dev/null +++ b/launch/api_client/test/test_models/test_model_download_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_download_response import ( + ModelDownloadResponse, +) + + +class TestModelDownloadResponse(unittest.TestCase): + """ModelDownloadResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_deployment_state.py b/launch/api_client/test/test_models/test_model_endpoint_deployment_state.py new file mode 100644 index 00000000..6824b916 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_endpoint_deployment_state.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_endpoint_deployment_state import ( + ModelEndpointDeploymentState, +) + + +class TestModelEndpointDeploymentState(unittest.TestCase): + """ModelEndpointDeploymentState unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_order_by.py b/launch/api_client/test/test_models/test_model_endpoint_order_by.py new file mode 100644 index 00000000..c2a28455 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_endpoint_order_by.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_endpoint_order_by import ( + ModelEndpointOrderBy, +) + + +class TestModelEndpointOrderBy(unittest.TestCase): + """ModelEndpointOrderBy unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_resource_state.py b/launch/api_client/test/test_models/test_model_endpoint_resource_state.py new file mode 100644 index 00000000..6d3ee8ad --- /dev/null +++ b/launch/api_client/test/test_models/test_model_endpoint_resource_state.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_endpoint_resource_state import ( + ModelEndpointResourceState, +) + + +class TestModelEndpointResourceState(unittest.TestCase): + """ModelEndpointResourceState unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_status.py b/launch/api_client/test/test_models/test_model_endpoint_status.py new file mode 100644 index 00000000..1e05870d --- /dev/null +++ b/launch/api_client/test/test_models/test_model_endpoint_status.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_endpoint_status import ModelEndpointStatus + + +class TestModelEndpointStatus(unittest.TestCase): + """ModelEndpointStatus unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_model_endpoint_type.py b/launch/api_client/test/test_models/test_model_endpoint_type.py new file mode 100644 index 00000000..b7d64522 --- /dev/null +++ b/launch/api_client/test/test_models/test_model_endpoint_type.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.model_endpoint_type import ModelEndpointType + + +class TestModelEndpointType(unittest.TestCase): + """ModelEndpointType unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_parallel_tool_calls.py b/launch/api_client/test/test_models/test_parallel_tool_calls.py new file mode 100644 index 00000000..2a41d29f --- /dev/null +++ b/launch/api_client/test/test_models/test_parallel_tool_calls.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.parallel_tool_calls import ParallelToolCalls + + +class TestParallelToolCalls(unittest.TestCase): + """ParallelToolCalls unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_prediction_content.py b/launch/api_client/test/test_models/test_prediction_content.py new file mode 100644 index 00000000..34a928b7 --- /dev/null +++ b/launch/api_client/test/test_models/test_prediction_content.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.prediction_content import PredictionContent + + +class TestPredictionContent(unittest.TestCase): + """PredictionContent unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt.py b/launch/api_client/test/test_models/test_prompt.py new file mode 100644 index 00000000..f75bacfc --- /dev/null +++ b/launch/api_client/test/test_models/test_prompt.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.prompt import Prompt + + +class TestPrompt(unittest.TestCase): + """Prompt unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt1.py b/launch/api_client/test/test_models/test_prompt1.py new file mode 100644 index 00000000..6d78b41d --- /dev/null +++ b/launch/api_client/test/test_models/test_prompt1.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.prompt1 import Prompt1 + + +class TestPrompt1(unittest.TestCase): + """Prompt1 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt1_item.py b/launch/api_client/test/test_models/test_prompt1_item.py new file mode 100644 index 00000000..23e9c769 --- /dev/null +++ b/launch/api_client/test/test_models/test_prompt1_item.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.prompt1_item import Prompt1Item + + +class TestPrompt1Item(unittest.TestCase): + """Prompt1Item unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_prompt_tokens_details.py b/launch/api_client/test/test_models/test_prompt_tokens_details.py new file mode 100644 index 00000000..6f98298b --- /dev/null +++ b/launch/api_client/test/test_models/test_prompt_tokens_details.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.prompt_tokens_details import PromptTokensDetails + + +class TestPromptTokensDetails(unittest.TestCase): + """PromptTokensDetails unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_pytorch_framework.py b/launch/api_client/test/test_models/test_pytorch_framework.py new file mode 100644 index 00000000..04c07858 --- /dev/null +++ b/launch/api_client/test/test_models/test_pytorch_framework.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.pytorch_framework import PytorchFramework + + +class TestPytorchFramework(unittest.TestCase): + """PytorchFramework unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_quantization.py b/launch/api_client/test/test_models/test_quantization.py new file mode 100644 index 00000000..29b080e2 --- /dev/null +++ b/launch/api_client/test/test_models/test_quantization.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.quantization import Quantization + + +class TestQuantization(unittest.TestCase): + """Quantization unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_reasoning_effort.py b/launch/api_client/test/test_models/test_reasoning_effort.py new file mode 100644 index 00000000..7fe410e0 --- /dev/null +++ b/launch/api_client/test/test_models/test_reasoning_effort.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.reasoning_effort import ReasoningEffort + + +class TestReasoningEffort(unittest.TestCase): + """ReasoningEffort unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_request_schema.py b/launch/api_client/test/test_models/test_request_schema.py new file mode 100644 index 00000000..9dc3c17a --- /dev/null +++ b/launch/api_client/test/test_models/test_request_schema.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.request_schema import RequestSchema + + +class TestRequestSchema(unittest.TestCase): + """RequestSchema unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_json_object.py b/launch/api_client/test/test_models/test_response_format_json_object.py new file mode 100644 index 00000000..23740fb2 --- /dev/null +++ b/launch/api_client/test/test_models/test_response_format_json_object.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.response_format_json_object import ( + ResponseFormatJsonObject, +) + + +class TestResponseFormatJsonObject(unittest.TestCase): + """ResponseFormatJsonObject unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_json_schema.py b/launch/api_client/test/test_models/test_response_format_json_schema.py new file mode 100644 index 00000000..227b1dbd --- /dev/null +++ b/launch/api_client/test/test_models/test_response_format_json_schema.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.response_format_json_schema import ( + ResponseFormatJsonSchema, +) + + +class TestResponseFormatJsonSchema(unittest.TestCase): + """ResponseFormatJsonSchema unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_json_schema_schema.py b/launch/api_client/test/test_models/test_response_format_json_schema_schema.py new file mode 100644 index 00000000..17d3d1c9 --- /dev/null +++ b/launch/api_client/test/test_models/test_response_format_json_schema_schema.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.response_format_json_schema_schema import ( + ResponseFormatJsonSchemaSchema, +) + + +class TestResponseFormatJsonSchemaSchema(unittest.TestCase): + """ResponseFormatJsonSchemaSchema unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_response_format_text.py b/launch/api_client/test/test_models/test_response_format_text.py new file mode 100644 index 00000000..e864dd69 --- /dev/null +++ b/launch/api_client/test/test_models/test_response_format_text.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.response_format_text import ResponseFormatText + + +class TestResponseFormatText(unittest.TestCase): + """ResponseFormatText unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_response_modalities.py b/launch/api_client/test/test_models/test_response_modalities.py new file mode 100644 index 00000000..5a279ccd --- /dev/null +++ b/launch/api_client/test/test_models/test_response_modalities.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.response_modalities import ResponseModalities + + +class TestResponseModalities(unittest.TestCase): + """ResponseModalities unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_response_schema.py b/launch/api_client/test/test_models/test_response_schema.py new file mode 100644 index 00000000..dcb01845 --- /dev/null +++ b/launch/api_client/test/test_models/test_response_schema.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.response_schema import ResponseSchema + + +class TestResponseSchema(unittest.TestCase): + """ResponseSchema unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_restart_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_restart_model_endpoint_v1_response.py new file mode 100644 index 00000000..0ca0631c --- /dev/null +++ b/launch/api_client/test/test_models/test_restart_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.restart_model_endpoint_v1_response import ( + RestartModelEndpointV1Response, +) + + +class TestRestartModelEndpointV1Response(unittest.TestCase): + """RestartModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_runnable_image_flavor.py b/launch/api_client/test/test_models/test_runnable_image_flavor.py new file mode 100644 index 00000000..e6dd8db1 --- /dev/null +++ b/launch/api_client/test/test_models/test_runnable_image_flavor.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.runnable_image_flavor import RunnableImageFlavor + + +class TestRunnableImageFlavor(unittest.TestCase): + """RunnableImageFlavor unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_service_tier.py b/launch/api_client/test/test_models/test_service_tier.py new file mode 100644 index 00000000..30ce36be --- /dev/null +++ b/launch/api_client/test/test_models/test_service_tier.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.service_tier import ServiceTier + + +class TestServiceTier(unittest.TestCase): + """ServiceTier unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_stop_configuration.py b/launch/api_client/test/test_models/test_stop_configuration.py new file mode 100644 index 00000000..416271d3 --- /dev/null +++ b/launch/api_client/test/test_models/test_stop_configuration.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.stop_configuration import StopConfiguration + + +class TestStopConfiguration(unittest.TestCase): + """StopConfiguration unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_stop_configuration1.py b/launch/api_client/test/test_models/test_stop_configuration1.py new file mode 100644 index 00000000..a8fe69ec --- /dev/null +++ b/launch/api_client/test/test_models/test_stop_configuration1.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.stop_configuration1 import StopConfiguration1 + + +class TestStopConfiguration1(unittest.TestCase): + """StopConfiguration1 unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_stream_error.py b/launch/api_client/test/test_models/test_stream_error.py new file mode 100644 index 00000000..259edbc3 --- /dev/null +++ b/launch/api_client/test/test_models/test_stream_error.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.stream_error import StreamError + + +class TestStreamError(unittest.TestCase): + """StreamError unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_stream_error_content.py b/launch/api_client/test/test_models/test_stream_error_content.py new file mode 100644 index 00000000..1754e3dc --- /dev/null +++ b/launch/api_client/test/test_models/test_stream_error_content.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.stream_error_content import StreamErrorContent + + +class TestStreamErrorContent(unittest.TestCase): + """StreamErrorContent unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_streaming_enhanced_runnable_image_flavor.py b/launch/api_client/test/test_models/test_streaming_enhanced_runnable_image_flavor.py new file mode 100644 index 00000000..9382521e --- /dev/null +++ b/launch/api_client/test/test_models/test_streaming_enhanced_runnable_image_flavor.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.streaming_enhanced_runnable_image_flavor import ( + StreamingEnhancedRunnableImageFlavor, +) + + +class TestStreamingEnhancedRunnableImageFlavor(unittest.TestCase): + """StreamingEnhancedRunnableImageFlavor unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_request.py b/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_request.py new file mode 100644 index 00000000..453920ec --- /dev/null +++ b/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.sync_endpoint_predict_v1_request import ( + SyncEndpointPredictV1Request, +) + + +class TestSyncEndpointPredictV1Request(unittest.TestCase): + """SyncEndpointPredictV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_response.py b/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_response.py new file mode 100644 index 00000000..761dbaf6 --- /dev/null +++ b/launch/api_client/test/test_models/test_sync_endpoint_predict_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.sync_endpoint_predict_v1_response import ( + SyncEndpointPredictV1Response, +) + + +class TestSyncEndpointPredictV1Response(unittest.TestCase): + """SyncEndpointPredictV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_task_status.py b/launch/api_client/test/test_models/test_task_status.py new file mode 100644 index 00000000..b028e01b --- /dev/null +++ b/launch/api_client/test/test_models/test_task_status.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.task_status import TaskStatus + + +class TestTaskStatus(unittest.TestCase): + """TaskStatus unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_tensorflow_framework.py b/launch/api_client/test/test_models/test_tensorflow_framework.py new file mode 100644 index 00000000..e0985cc6 --- /dev/null +++ b/launch/api_client/test/test_models/test_tensorflow_framework.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.tensorflow_framework import TensorflowFramework + + +class TestTensorflowFramework(unittest.TestCase): + """TensorflowFramework unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_token_output.py b/launch/api_client/test/test_models/test_token_output.py new file mode 100644 index 00000000..ad1d67af --- /dev/null +++ b/launch/api_client/test/test_models/test_token_output.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.token_output import TokenOutput + + +class TestTokenOutput(unittest.TestCase): + """TokenOutput unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_tool_config.py b/launch/api_client/test/test_models/test_tool_config.py new file mode 100644 index 00000000..5502838c --- /dev/null +++ b/launch/api_client/test/test_models/test_tool_config.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.tool_config import ToolConfig + + +class TestToolConfig(unittest.TestCase): + """ToolConfig unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_top_logprob.py b/launch/api_client/test/test_models/test_top_logprob.py new file mode 100644 index 00000000..6a007805 --- /dev/null +++ b/launch/api_client/test/test_models/test_top_logprob.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.top_logprob import TopLogprob + + +class TestTopLogprob(unittest.TestCase): + """TopLogprob unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_triton_enhanced_runnable_image_flavor.py b/launch/api_client/test/test_models/test_triton_enhanced_runnable_image_flavor.py new file mode 100644 index 00000000..dacfe32e --- /dev/null +++ b/launch/api_client/test/test_models/test_triton_enhanced_runnable_image_flavor.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.triton_enhanced_runnable_image_flavor import ( + TritonEnhancedRunnableImageFlavor, +) + + +class TestTritonEnhancedRunnableImageFlavor(unittest.TestCase): + """TritonEnhancedRunnableImageFlavor unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_completions_v2_request.py b/launch/api_client/test/test_models/test_update_batch_completions_v2_request.py new file mode 100644 index 00000000..e2a443f3 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_batch_completions_v2_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_batch_completions_v2_request import ( + UpdateBatchCompletionsV2Request, +) + + +class TestUpdateBatchCompletionsV2Request(unittest.TestCase): + """UpdateBatchCompletionsV2Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_completions_v2_response.py b/launch/api_client/test/test_models/test_update_batch_completions_v2_response.py new file mode 100644 index 00000000..65f21948 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_batch_completions_v2_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_batch_completions_v2_response import ( + UpdateBatchCompletionsV2Response, +) + + +class TestUpdateBatchCompletionsV2Response(unittest.TestCase): + """UpdateBatchCompletionsV2Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_job_v1_request.py b/launch/api_client/test/test_models/test_update_batch_job_v1_request.py new file mode 100644 index 00000000..9bcdc368 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_batch_job_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_batch_job_v1_request import ( + UpdateBatchJobV1Request, +) + + +class TestUpdateBatchJobV1Request(unittest.TestCase): + """UpdateBatchJobV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_batch_job_v1_response.py b/launch/api_client/test/test_models/test_update_batch_job_v1_response.py new file mode 100644 index 00000000..c445d59a --- /dev/null +++ b/launch/api_client/test/test_models/test_update_batch_job_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_batch_job_v1_response import ( + UpdateBatchJobV1Response, +) + + +class TestUpdateBatchJobV1Response(unittest.TestCase): + """UpdateBatchJobV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_deep_speed_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_deep_speed_model_endpoint_request.py new file mode 100644 index 00000000..ba1ac2f9 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_deep_speed_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_deep_speed_model_endpoint_request import ( + UpdateDeepSpeedModelEndpointRequest, +) + + +class TestUpdateDeepSpeedModelEndpointRequest(unittest.TestCase): + """UpdateDeepSpeedModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_request.py b/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_request.py new file mode 100644 index 00000000..a42c685f --- /dev/null +++ b/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_docker_image_batch_job_v1_request import ( + UpdateDockerImageBatchJobV1Request, +) + + +class TestUpdateDockerImageBatchJobV1Request(unittest.TestCase): + """UpdateDockerImageBatchJobV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_response.py b/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_response.py new file mode 100644 index 00000000..93042b86 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_docker_image_batch_job_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_docker_image_batch_job_v1_response import ( + UpdateDockerImageBatchJobV1Response, +) + + +class TestUpdateDockerImageBatchJobV1Response(unittest.TestCase): + """UpdateDockerImageBatchJobV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_request.py new file mode 100644 index 00000000..d26a95ab --- /dev/null +++ b/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_llm_model_endpoint_v1_request import ( + UpdateLLMModelEndpointV1Request, +) + + +class TestUpdateLLMModelEndpointV1Request(unittest.TestCase): + """UpdateLLMModelEndpointV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_response.py new file mode 100644 index 00000000..fcb9a57d --- /dev/null +++ b/launch/api_client/test/test_models/test_update_llm_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_llm_model_endpoint_v1_response import ( + UpdateLLMModelEndpointV1Response, +) + + +class TestUpdateLLMModelEndpointV1Response(unittest.TestCase): + """UpdateLLMModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_model_endpoint_v1_request.py b/launch/api_client/test/test_models/test_update_model_endpoint_v1_request.py new file mode 100644 index 00000000..24ce845f --- /dev/null +++ b/launch/api_client/test/test_models/test_update_model_endpoint_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_model_endpoint_v1_request import ( + UpdateModelEndpointV1Request, +) + + +class TestUpdateModelEndpointV1Request(unittest.TestCase): + """UpdateModelEndpointV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_model_endpoint_v1_response.py b/launch/api_client/test/test_models/test_update_model_endpoint_v1_response.py new file mode 100644 index 00000000..36a54c43 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_model_endpoint_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_model_endpoint_v1_response import ( + UpdateModelEndpointV1Response, +) + + +class TestUpdateModelEndpointV1Response(unittest.TestCase): + """UpdateModelEndpointV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_sg_lang_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_sg_lang_model_endpoint_request.py new file mode 100644 index 00000000..8e0e604c --- /dev/null +++ b/launch/api_client/test/test_models/test_update_sg_lang_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_sg_lang_model_endpoint_request import ( + UpdateSGLangModelEndpointRequest, +) + + +class TestUpdateSGLangModelEndpointRequest(unittest.TestCase): + """UpdateSGLangModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_text_generation_inference_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_text_generation_inference_model_endpoint_request.py new file mode 100644 index 00000000..7a87841b --- /dev/null +++ b/launch/api_client/test/test_models/test_update_text_generation_inference_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_text_generation_inference_model_endpoint_request import ( + UpdateTextGenerationInferenceModelEndpointRequest, +) + + +class TestUpdateTextGenerationInferenceModelEndpointRequest(unittest.TestCase): + """UpdateTextGenerationInferenceModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_trigger_v1_request.py b/launch/api_client/test/test_models/test_update_trigger_v1_request.py new file mode 100644 index 00000000..e0b8e004 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_trigger_v1_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_trigger_v1_request import ( + UpdateTriggerV1Request, +) + + +class TestUpdateTriggerV1Request(unittest.TestCase): + """UpdateTriggerV1Request unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_trigger_v1_response.py b/launch/api_client/test/test_models/test_update_trigger_v1_response.py new file mode 100644 index 00000000..253d37d2 --- /dev/null +++ b/launch/api_client/test/test_models/test_update_trigger_v1_response.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_trigger_v1_response import ( + UpdateTriggerV1Response, +) + + +class TestUpdateTriggerV1Response(unittest.TestCase): + """UpdateTriggerV1Response unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_update_vllm_model_endpoint_request.py b/launch/api_client/test/test_models/test_update_vllm_model_endpoint_request.py new file mode 100644 index 00000000..9b488dcd --- /dev/null +++ b/launch/api_client/test/test_models/test_update_vllm_model_endpoint_request.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.update_vllm_model_endpoint_request import ( + UpdateVLLMModelEndpointRequest, +) + + +class TestUpdateVLLMModelEndpointRequest(unittest.TestCase): + """UpdateVLLMModelEndpointRequest unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_upload_file_response.py b/launch/api_client/test/test_models/test_upload_file_response.py new file mode 100644 index 00000000..e734761a --- /dev/null +++ b/launch/api_client/test/test_models/test_upload_file_response.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.upload_file_response import UploadFileResponse + + +class TestUploadFileResponse(unittest.TestCase): + """UploadFileResponse unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_url_citation.py b/launch/api_client/test/test_models/test_url_citation.py new file mode 100644 index 00000000..4e42efe6 --- /dev/null +++ b/launch/api_client/test/test_models/test_url_citation.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.url_citation import UrlCitation + + +class TestUrlCitation(unittest.TestCase): + """UrlCitation unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_user_location.py b/launch/api_client/test/test_models/test_user_location.py new file mode 100644 index 00000000..5c6048ea --- /dev/null +++ b/launch/api_client/test/test_models/test_user_location.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.user_location import UserLocation + + +class TestUserLocation(unittest.TestCase): + """UserLocation unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_validation_error.py b/launch/api_client/test/test_models/test_validation_error.py new file mode 100644 index 00000000..3147cb65 --- /dev/null +++ b/launch/api_client/test/test_models/test_validation_error.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.validation_error import ValidationError + + +class TestValidationError(unittest.TestCase): + """ValidationError unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_voice_ids_shared.py b/launch/api_client/test/test_models/test_voice_ids_shared.py new file mode 100644 index 00000000..dfe31382 --- /dev/null +++ b/launch/api_client/test/test_models/test_voice_ids_shared.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.voice_ids_shared import VoiceIdsShared + + +class TestVoiceIdsShared(unittest.TestCase): + """VoiceIdsShared unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_web_search_context_size.py b/launch/api_client/test/test_models/test_web_search_context_size.py new file mode 100644 index 00000000..457052f1 --- /dev/null +++ b/launch/api_client/test/test_models/test_web_search_context_size.py @@ -0,0 +1,28 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.web_search_context_size import ( + WebSearchContextSize, +) + + +class TestWebSearchContextSize(unittest.TestCase): + """WebSearchContextSize unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_web_search_location.py b/launch/api_client/test/test_models/test_web_search_location.py new file mode 100644 index 00000000..7cee87fd --- /dev/null +++ b/launch/api_client/test/test_models/test_web_search_location.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.web_search_location import WebSearchLocation + + +class TestWebSearchLocation(unittest.TestCase): + """WebSearchLocation unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_web_search_options.py b/launch/api_client/test/test_models/test_web_search_options.py new file mode 100644 index 00000000..ad6020c3 --- /dev/null +++ b/launch/api_client/test/test_models/test_web_search_options.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.web_search_options import WebSearchOptions + + +class TestWebSearchOptions(unittest.TestCase): + """WebSearchOptions unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/api_client/test/test_models/test_zip_artifact_flavor.py b/launch/api_client/test/test_models/test_zip_artifact_flavor.py new file mode 100644 index 00000000..d301f34f --- /dev/null +++ b/launch/api_client/test/test_models/test_zip_artifact_flavor.py @@ -0,0 +1,26 @@ +# coding: utf-8 + +""" + launch + + No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + +import unittest + +import launch.api_client +from launch.api_client import configuration +from launch.api_client.model.zip_artifact_flavor import ZipArtifactFlavor + + +class TestZipArtifactFlavor(unittest.TestCase): + """ZipArtifactFlavor unit test stubs""" + + _configuration = configuration.Configuration() + + +if __name__ == "__main__": + unittest.main() diff --git a/launch/client.py b/launch/client.py index 89a686a9..df6f3d6f 100644 --- a/launch/client.py +++ b/launch/client.py @@ -29,6 +29,9 @@ from launch.api_client import ApiClient, Configuration from launch.api_client.apis.tags.default_api import DefaultApi +from launch.api_client.model.body_upload_file_v1_files_post import ( + BodyUploadFileV1FilesPost, +) from launch.api_client.model.callback_auth import CallbackAuth from launch.api_client.model.clone_model_bundle_v1_request import ( CloneModelBundleV1Request, @@ -3182,12 +3185,12 @@ def upload_file( UploadFileResponse: ID of the created file """ with open(file_path, "rb") as file: - files = {"file": file} + body = BodyUploadFileV1FilesPost(file=file) with ApiClient(self.configuration) as api_client: api_instance = DefaultApi(api_client) response = api_instance.upload_file_v1_files_post( - body=files, + body=body, skip_deserialization=True, ) resp = UploadFileResponse.parse_raw(response.response.data) diff --git a/openapi.json b/openapi.json new file mode 100644 index 00000000..24818db9 --- /dev/null +++ b/openapi.json @@ -0,0 +1,14645 @@ +{ + "openapi": "3.0.3", + "info": { + "title": "launch", + "version": "1.0.0" + }, + "paths": { + "/v1/batch-jobs": { + "post": { + "summary": "Create Batch Job", + "description": "Runs a batch job.", + "operationId": "create_batch_job_v1_batch_jobs_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateBatchJobV1Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateBatchJobV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/batch-jobs/{batch_job_id}": { + "get": { + "summary": "Get Batch Job", + "description": "Gets a batch job.", + "operationId": "get_batch_job_v1_batch_jobs__batch_job_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "batch_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetBatchJobV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "put": { + "summary": "Update Batch Job", + "description": "Updates a batch job.", + "operationId": "update_batch_job_v1_batch_jobs__batch_job_id__put", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "batch_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Job Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateBatchJobV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateBatchJobV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/docker-image-batch-jobs": { + "post": { + "summary": "Create Docker Image Batch Job", + "operationId": "create_docker_image_batch_job_v1_docker_image_batch_jobs_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateDockerImageBatchJobV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateDockerImageBatchJobV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "summary": "List Docker Image Batch Jobs", + "description": "Lists docker image batch jobs spawned by trigger with given ID", + "operationId": "list_docker_image_batch_jobs_v1_docker_image_batch_jobs_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "trigger_id", + "in": "query", + "required": false, + "schema": { + "title": "Trigger Id", + "type": "string", + "nullable": true + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListDockerImageBatchJobsV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/docker-image-batch-jobs/{batch_job_id}": { + "get": { + "summary": "Get Docker Image Batch Job", + "operationId": "get_docker_image_batch_job_v1_docker_image_batch_jobs__batch_job_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "batch_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Job Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetDockerImageBatchJobV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "put": { + "summary": "Update Docker Image Batch Job", + "operationId": "update_docker_image_batch_job_v1_docker_image_batch_jobs__batch_job_id__put", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "batch_job_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Job Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateDockerImageBatchJobV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateDockerImageBatchJobV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/async-tasks": { + "post": { + "summary": "Create Async Inference Task", + "description": "Runs an async inference prediction.", + "operationId": "create_async_inference_task_v1_async_tasks_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/EndpointPredictV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateAsyncTaskV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/async-tasks/{task_id}": { + "get": { + "summary": "Get Async Inference Task", + "description": "Gets the status of an async inference task.", + "operationId": "get_async_inference_task_v1_async_tasks__task_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "task_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Task Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetAsyncTaskV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/sync-tasks": { + "post": { + "summary": "Create Sync Inference Task", + "description": "Runs a sync inference prediction.", + "operationId": "create_sync_inference_task_v1_sync_tasks_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncEndpointPredictV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncEndpointPredictV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/streaming-tasks": { + "post": { + "summary": "Create Streaming Inference Task", + "description": "Runs a streaming inference prediction.", + "operationId": "create_streaming_inference_task_v1_streaming_tasks_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_id", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SyncEndpointPredictV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/model-bundles": { + "post": { + "summary": "Create Model Bundle", + "description": "Creates a ModelBundle for the current user.", + "operationId": "create_model_bundle_v1_model_bundles_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelBundleV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "summary": "List Model Bundles", + "description": "Lists the ModelBundles owned by the current owner.", + "operationId": "list_model_bundles_v1_model_bundles_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_name", + "in": "query", + "required": false, + "schema": { + "title": "Model Name", + "type": "string", + "nullable": true + } + }, + { + "name": "order_by", + "in": "query", + "required": false, + "schema": { + "title": "Order By", + "$ref": "#/components/schemas/ModelBundleOrderBy", + "nullable": true + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelBundlesV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/model-bundles/clone-with-changes": { + "post": { + "summary": "Clone Model Bundle With Changes", + "description": "Creates a ModelBundle by cloning an existing one and then applying changes on top.", + "operationId": "clone_model_bundle_with_changes_v1_model_bundles_clone_with_changes_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CloneModelBundleV1Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/model-bundles/latest": { + "get": { + "summary": "Get Latest Model Bundle", + "description": "Gets the latest Model Bundle with the given name owned by the current owner.", + "operationId": "get_latest_model_bundle_v1_model_bundles_latest_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model Name" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/model-bundles/{model_bundle_id}": { + "get": { + "summary": "Get Model Bundle", + "description": "Gets the details for a given ModelBundle owned by the current owner.", + "operationId": "get_model_bundle_v1_model_bundles__model_bundle_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_bundle_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Bundle Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v2/model-bundles": { + "post": { + "summary": "Create Model Bundle", + "description": "Creates a ModelBundle for the current user.", + "operationId": "create_model_bundle_v2_model_bundles_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelBundleV2Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelBundleV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "summary": "List Model Bundles", + "description": "Lists the ModelBundles owned by the current owner.", + "operationId": "list_model_bundles_v2_model_bundles_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_name", + "in": "query", + "required": false, + "schema": { + "title": "Model Name", + "type": "string", + "nullable": true + } + }, + { + "name": "order_by", + "in": "query", + "required": false, + "schema": { + "title": "Order By", + "$ref": "#/components/schemas/ModelBundleOrderBy", + "nullable": true + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelBundlesV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v2/model-bundles/clone-with-changes": { + "post": { + "summary": "Clone Model Bundle With Changes", + "description": "Creates a ModelBundle by cloning an existing one and then applying changes on top.", + "operationId": "clone_model_bundle_with_changes_v2_model_bundles_clone_with_changes_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CloneModelBundleV2Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelBundleV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v2/model-bundles/latest": { + "get": { + "summary": "Get Latest Model Bundle", + "description": "Gets the latest Model Bundle with the given name owned by the current owner.", + "operationId": "get_latest_model_bundle_v2_model_bundles_latest_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model Name" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelBundleV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v2/model-bundles/{model_bundle_id}": { + "get": { + "summary": "Get Model Bundle", + "description": "Gets the details for a given ModelBundle owned by the current owner.", + "operationId": "get_model_bundle_v2_model_bundles__model_bundle_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_bundle_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Bundle Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelBundleV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/model-endpoints": { + "post": { + "summary": "Create Model Endpoint", + "description": "Creates a Model for the current user.", + "operationId": "create_model_endpoint_v1_model_endpoints_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelEndpointV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "summary": "List Model Endpoints", + "description": "Lists the Models owned by the current owner.", + "operationId": "list_model_endpoints_v1_model_endpoints_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "name", + "in": "query", + "required": false, + "schema": { + "title": "Name", + "type": "string", + "nullable": true + } + }, + { + "name": "order_by", + "in": "query", + "required": false, + "schema": { + "title": "Order By", + "$ref": "#/components/schemas/ModelEndpointOrderBy", + "nullable": true + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListModelEndpointsV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/model-endpoints/{model_endpoint_id}": { + "get": { + "summary": "Get Model Endpoint", + "description": "Describe the Model endpoint with given ID.", + "operationId": "get_model_endpoint_v1_model_endpoints__model_endpoint_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "put": { + "summary": "Update Model Endpoint", + "description": "Updates the Model endpoint.", + "operationId": "update_model_endpoint_v1_model_endpoints__model_endpoint_id__put", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateModelEndpointV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "summary": "Delete Model Endpoint", + "description": "Lists the Models owned by the current owner.", + "operationId": "delete_model_endpoint_v1_model_endpoints__model_endpoint_id__delete", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/model-endpoints/{model_endpoint_id}/restart": { + "post": { + "summary": "Restart Model Endpoint", + "description": "Restarts the Model endpoint.", + "operationId": "restart_model_endpoint_v1_model_endpoints__model_endpoint_id__restart_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RestartModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/model-endpoints-schema.json": { + "get": { + "summary": "Get Model Endpoints Schema", + "description": "Lists the schemas of the Model Endpoints owned by the current owner.", + "operationId": "get_model_endpoints_schema_v1_model_endpoints_schema_json_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/model-endpoints-api": { + "get": { + "summary": "Get Model Endpoints Api", + "description": "Shows the API of the Model Endpoints owned by the current owner.", + "operationId": "get_model_endpoints_api_v1_model_endpoints_api_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/docker-image-batch-job-bundles": { + "post": { + "summary": "Create Docker Image Batch Job Bundle", + "description": "Creates a docker iamge batch job bundle", + "operationId": "create_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateDockerImageBatchJobBundleV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateDockerImageBatchJobBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "summary": "List Docker Image Batch Job Model Bundles", + "description": "Lists docker image batch job bundles owned by current owner", + "operationId": "list_docker_image_batch_job_model_bundles_v1_docker_image_batch_job_bundles_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "bundle_name", + "in": "query", + "required": false, + "schema": { + "title": "Bundle Name", + "type": "string", + "nullable": true + } + }, + { + "name": "order_by", + "in": "query", + "required": false, + "schema": { + "title": "Order By", + "$ref": "#/components/schemas/ModelBundleOrderBy", + "nullable": true + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListDockerImageBatchJobBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/docker-image-batch-job-bundles/latest": { + "get": { + "summary": "Get Latest Docker Image Batch Job Bundle", + "description": "Gets latest Docker Image Batch Job Bundle with given name owned by the current owner", + "operationId": "get_latest_docker_image_batch_job_bundle_v1_docker_image_batch_job_bundles_latest_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "bundle_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Bundle Name" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DockerImageBatchJobBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/docker-image-batch-job-bundles/{docker_image_batch_job_bundle_id}": { + "get": { + "summary": "Get Docker Image Batch Job Model Bundle", + "description": "Get details for a given DockerImageBatchJobBundle owned by the current owner", + "operationId": "get_docker_image_batch_job_model_bundle_v1_docker_image_batch_job_bundles__docker_image_batch_job_bundle_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "docker_image_batch_job_bundle_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Docker Image Batch Job Bundle Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DockerImageBatchJobBundleV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/model-endpoints": { + "post": { + "summary": "Create Model Endpoint", + "operationId": "create_model_endpoint_v1_llm_model_endpoints_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateLLMModelEndpointV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateLLMModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "get": { + "summary": "List Model Endpoints", + "description": "Lists the LLM model endpoints owned by the current owner, plus all public_inference LLMs.", + "operationId": "list_model_endpoints_v1_llm_model_endpoints_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "name", + "in": "query", + "required": false, + "schema": { + "title": "Name", + "type": "string", + "nullable": true + } + }, + { + "name": "order_by", + "in": "query", + "required": false, + "schema": { + "title": "Order By", + "$ref": "#/components/schemas/ModelEndpointOrderBy", + "nullable": true + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListLLMModelEndpointsV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/model-endpoints/{model_endpoint_name}": { + "get": { + "summary": "Get Model Endpoint", + "description": "Describe the LLM Model endpoint with given name.", + "operationId": "get_model_endpoint_v1_llm_model_endpoints__model_endpoint_name__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Name" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetLLMModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "put": { + "summary": "Update Model Endpoint", + "description": "Updates an LLM endpoint for the current user.", + "operationId": "update_model_endpoint_v1_llm_model_endpoints__model_endpoint_name__put", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Name" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateLLMModelEndpointV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateLLMModelEndpointV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "summary": "Delete Llm Model Endpoint", + "operationId": "delete_llm_model_endpoint_v1_llm_model_endpoints__model_endpoint_name__delete", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_name", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Name" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteLLMEndpointResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/completions-sync": { + "post": { + "summary": "Create Completion Sync Task", + "description": "Runs a sync prompt completion on an LLM.", + "operationId": "create_completion_sync_task_v1_llm_completions_sync_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Name" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CompletionSyncV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CompletionSyncV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/completions-stream": { + "post": { + "summary": "Create Completion Stream Task", + "description": "Runs a stream prompt completion on an LLM.", + "operationId": "create_completion_stream_task_v1_llm_completions_stream_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "model_endpoint_name", + "in": "query", + "required": true, + "schema": { + "type": "string", + "title": "Model Endpoint Name" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CompletionStreamV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CompletionStreamV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/fine-tunes": { + "get": { + "summary": "List Fine Tunes", + "operationId": "list_fine_tunes_v1_llm_fine_tunes_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListFineTunesResponse" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + }, + "post": { + "summary": "Create Fine Tune", + "operationId": "create_fine_tune_v1_llm_fine_tunes_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateFineTuneRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateFineTuneResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/llm/fine-tunes/{fine_tune_id}": { + "get": { + "summary": "Get Fine Tune", + "operationId": "get_fine_tune_v1_llm_fine_tunes__fine_tune_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "fine_tune_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Fine Tune Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetFineTuneResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/fine-tunes/{fine_tune_id}/cancel": { + "put": { + "summary": "Cancel Fine Tune", + "operationId": "cancel_fine_tune_v1_llm_fine_tunes__fine_tune_id__cancel_put", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "fine_tune_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Fine Tune Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CancelFineTuneResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/fine-tunes/{fine_tune_id}/events": { + "get": { + "summary": "Get Fine Tune Events", + "operationId": "get_fine_tune_events_v1_llm_fine_tunes__fine_tune_id__events_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "fine_tune_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Fine Tune Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetFineTuneEventsResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/llm/model-endpoints/download": { + "post": { + "summary": "Download Model Endpoint", + "operationId": "download_model_endpoint_v1_llm_model_endpoints_download_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelDownloadRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ModelDownloadResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/llm/batch-completions": { + "post": { + "summary": "Create Batch Completions", + "operationId": "create_batch_completions_v1_llm_batch_completions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateBatchCompletionsV1Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateBatchCompletionsV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/files": { + "get": { + "summary": "List Files", + "operationId": "list_files_v1_files_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListFilesResponse" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + }, + "post": { + "summary": "Upload File", + "operationId": "upload_file_v1_files_post", + "requestBody": { + "content": { + "multipart/form-data": { + "schema": { + "$ref": "#/components/schemas/Body_upload_file_v1_files_post" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UploadFileResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/files/{file_id}": { + "get": { + "summary": "Get File", + "operationId": "get_file_v1_files__file_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetFileResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "summary": "Delete File", + "operationId": "delete_file_v1_files__file_id__delete", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteFileResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/files/{file_id}/content": { + "get": { + "summary": "Get File Content", + "description": "Describe the LLM Model endpoint with given name.", + "operationId": "get_file_content_v1_files__file_id__content_get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "file_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "File Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetFileContentResponse" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v1/triggers": { + "get": { + "summary": "List Triggers", + "description": "Lists descriptions of all triggers", + "operationId": "list_triggers_v1_triggers_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ListTriggersV1Response" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + }, + "post": { + "summary": "Create Trigger", + "description": "Creates and runs a trigger", + "operationId": "create_trigger_v1_triggers_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateTriggerV1Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateTriggerV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v1/triggers/{trigger_id}": { + "get": { + "summary": "Get Trigger", + "description": "Describes the trigger with the given ID", + "operationId": "get_trigger_v1_triggers__trigger_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "trigger_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Trigger Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetTriggerV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "put": { + "summary": "Update Trigger", + "description": "Updates the trigger with the given ID", + "operationId": "update_trigger_v1_triggers__trigger_id__put", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "trigger_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Trigger Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateTriggerV1Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateTriggerV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "delete": { + "summary": "Delete Trigger", + "description": "Deletes the trigger with the given ID", + "operationId": "delete_trigger_v1_triggers__trigger_id__delete", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "trigger_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Trigger Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteTriggerV1Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v2/batch-completions": { + "post": { + "summary": "Batch Completions", + "operationId": "batch_completions_v2_batch_completions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateBatchCompletionsV2Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BatchCompletionsJob" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v2/batch-completions/{batch_completion_id}": { + "get": { + "summary": "Get Batch Completion", + "operationId": "get_batch_completion_v2_batch_completions__batch_completion_id__get", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "batch_completion_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Completion Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/GetBatchCompletionV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + }, + "post": { + "summary": "Update Batch Completion", + "operationId": "update_batch_completion_v2_batch_completions__batch_completion_id__post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "batch_completion_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Completion Id" + } + } + ], + "requestBody": { + "required": true, + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateBatchCompletionsV2Request" + } + } + } + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/UpdateBatchCompletionsV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v2/batch-completions/{batch_completion_id}/actions/cancel": { + "post": { + "summary": "Cancel Batch Completion", + "operationId": "cancel_batch_completion_v2_batch_completions__batch_completion_id__actions_cancel_post", + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ], + "parameters": [ + { + "name": "batch_completion_id", + "in": "path", + "required": true, + "schema": { + "type": "string", + "title": "Batch Completion Id" + } + } + ], + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CancelBatchCompletionsV2Response" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + } + } + }, + "/v2/chat/completions": { + "post": { + "summary": "Chat Completion", + "operationId": "chat_completion_v2_chat_completions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/ChatCompletionV2Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "$ref": "#/components/schemas/CreateChatCompletionResponse" + }, + { + "$ref": "#/components/schemas/CreateChatCompletionStreamResponse" + }, + { + "$ref": "#/components/schemas/ChatCompletionV2StreamErrorChunk" + } + ], + "title": "Response Chat Completion V2 Chat Completions Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/v2/completions": { + "post": { + "summary": "Completion", + "operationId": "completion_v2_completions_post", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CompletionV2Request" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "anyOf": [ + { + "$ref": "#/components/schemas/CreateCompletionResponse" + }, + { + "$ref": "#/components/schemas/CompletionV2StreamErrorChunk" + } + ], + "title": "Response Completion V2 Completions Post" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBasic": [] + }, + { + "OAuth2PasswordBearer": [] + } + ] + } + }, + "/healthcheck": { + "get": { + "summary": "Healthcheck", + "description": "Returns 200 if the app is healthy.", + "operationId": "healthcheck_healthcheck_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + } + } + } + }, + "/healthz": { + "get": { + "summary": "Healthcheck", + "description": "Returns 200 if the app is healthy.", + "operationId": "healthcheck_healthz_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + } + } + } + }, + "/readyz": { + "get": { + "summary": "Healthcheck", + "description": "Returns 200 if the app is healthy.", + "operationId": "healthcheck_readyz_get", + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": {} + } + } + } + } + } + } + }, + "components": { + "schemas": { + "Annotation": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the URL citation. Always `url_citation`.", + "enum": [ + "url_citation" + ] + }, + "url_citation": { + "$ref": "#/components/schemas/UrlCitation", + "description": "A URL citation when using web search." + } + }, + "type": "object", + "required": [ + "type", + "url_citation" + ], + "title": "Annotation" + }, + "Audio": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "Unique identifier for a previous audio response from the model.\n" + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "Audio" + }, + "Audio1": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "Unique identifier for this audio response." + }, + "expires_at": { + "type": "integer", + "title": "Expires At", + "description": "The Unix timestamp (in seconds) for when this audio response will\nno longer be accessible on the server for use in multi-turn\nconversations.\n" + }, + "data": { + "type": "string", + "title": "Data", + "description": "Base64 encoded audio bytes generated by the model, in the format\nspecified in the request.\n" + }, + "transcript": { + "type": "string", + "title": "Transcript", + "description": "Transcript of the audio generated by the model." + } + }, + "type": "object", + "required": [ + "id", + "expires_at", + "data", + "transcript" + ], + "title": "Audio1" + }, + "Audio2": { + "properties": { + "voice": { + "$ref": "#/components/schemas/VoiceIdsShared", + "description": "The voice the model uses to respond. Supported voices are \n`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.\n" + }, + "format": { + "type": "string", + "enum": [ + "wav", + "aac", + "mp3", + "flac", + "opus", + "pcm16" + ], + "title": "Format", + "description": "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`.\n" + } + }, + "type": "object", + "required": [ + "voice", + "format" + ], + "title": "Audio2" + }, + "BatchCompletionsJob": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + }, + "input_data_path": { + "title": "Input Data Path", + "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", + "type": "string", + "nullable": true + }, + "output_data_path": { + "type": "string", + "title": "Output Data Path", + "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." + }, + "model_config": { + "$ref": "#/components/schemas/BatchCompletionsModelConfig", + "description": "Model configuration for the batch inference. Hardware configurations are inferred." + }, + "priority": { + "title": "Priority", + "description": "Priority of the batch inference job. Default to None.", + "type": "string", + "nullable": true + }, + "status": { + "$ref": "#/components/schemas/BatchCompletionsJobStatus" + }, + "created_at": { + "type": "string", + "title": "Created At" + }, + "expires_at": { + "type": "string", + "title": "Expires At" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + } + }, + "type": "object", + "required": [ + "job_id", + "output_data_path", + "model_config", + "status", + "created_at", + "expires_at", + "completed_at", + "metadata" + ], + "title": "BatchCompletionsJob" + }, + "BatchCompletionsJobStatus": { + "type": "string", + "enum": [ + "queued", + "running", + "completed", + "failed", + "cancelled", + "unknown" + ], + "title": "BatchCompletionsJobStatus" + }, + "BatchCompletionsModelConfig": { + "properties": { + "max_model_len": { + "title": "Max Model Len", + "description": "Model context length, If unspecified, will be automatically derived from the model config", + "type": "integer", + "nullable": true + }, + "max_num_seqs": { + "title": "Max Num Seqs", + "description": "Maximum number of sequences per iteration", + "type": "integer", + "nullable": true + }, + "enforce_eager": { + "title": "Enforce Eager", + "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", + "type": "boolean", + "nullable": true + }, + "trust_remote_code": { + "title": "Trust Remote Code", + "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", + "default": false, + "type": "boolean", + "nullable": true + }, + "pipeline_parallel_size": { + "title": "Pipeline Parallel Size", + "description": "Number of pipeline stages. Default to None.", + "type": "integer", + "nullable": true + }, + "tensor_parallel_size": { + "title": "Tensor Parallel Size", + "description": "Number of tensor parallel replicas. Default to None.", + "type": "integer", + "nullable": true + }, + "quantization": { + "title": "Quantization", + "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", + "type": "string", + "nullable": true + }, + "disable_log_requests": { + "title": "Disable Log Requests", + "description": "Disable logging requests. Default to None.", + "type": "boolean", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "tool_call_parser": { + "title": "Tool Call Parser", + "description": "Tool call parser", + "type": "string", + "nullable": true + }, + "enable_auto_tool_choice": { + "title": "Enable Auto Tool Choice", + "description": "Enable auto tool choice", + "type": "boolean", + "nullable": true + }, + "load_format": { + "title": "Load Format", + "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", + "type": "string", + "nullable": true + }, + "config_format": { + "title": "Config Format", + "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", + "type": "string", + "nullable": true + }, + "tokenizer_mode": { + "title": "Tokenizer Mode", + "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", + "type": "string", + "nullable": true + }, + "limit_mm_per_prompt": { + "title": "Limit Mm Per Prompt", + "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", + "type": "string", + "nullable": true + }, + "max_num_batched_tokens": { + "title": "Max Num Batched Tokens", + "description": "Maximum number of batched tokens per iteration", + "type": "integer", + "nullable": true + }, + "tokenizer": { + "title": "Tokenizer", + "description": "Name or path of the huggingface tokenizer to use.", + "type": "string", + "nullable": true + }, + "dtype": { + "title": "Dtype", + "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", + "type": "string", + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "Random seed for the model.", + "type": "integer", + "nullable": true + }, + "revision": { + "title": "Revision", + "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "code_revision": { + "title": "Code Revision", + "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "rope_scaling": { + "title": "Rope Scaling", + "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "tokenizer_revision": { + "title": "Tokenizer Revision", + "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "quantization_param_path": { + "title": "Quantization Param Path", + "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", + "type": "string", + "nullable": true + }, + "max_seq_len_to_capture": { + "title": "Max Seq Len To Capture", + "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", + "type": "integer", + "nullable": true + }, + "disable_sliding_window": { + "title": "Disable Sliding Window", + "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", + "type": "boolean", + "nullable": true + }, + "skip_tokenizer_init": { + "title": "Skip Tokenizer Init", + "description": "If true, skip initialization of tokenizer and detokenizer.", + "type": "boolean", + "nullable": true + }, + "served_model_name": { + "title": "Served Model Name", + "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", + "type": "string", + "nullable": true + }, + "override_neuron_config": { + "title": "Override Neuron Config", + "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "mm_processor_kwargs": { + "title": "Mm Processor Kwargs", + "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "block_size": { + "title": "Block Size", + "description": "Size of a cache block in number of tokens.", + "type": "integer", + "nullable": true + }, + "gpu_memory_utilization": { + "title": "Gpu Memory Utilization", + "description": "Fraction of GPU memory to use for the vLLM execution.", + "type": "number", + "nullable": true + }, + "swap_space": { + "title": "Swap Space", + "description": "Size of the CPU swap space per GPU (in GiB).", + "type": "number", + "nullable": true + }, + "cache_dtype": { + "title": "Cache Dtype", + "description": "Data type for kv cache storage.", + "type": "string", + "nullable": true + }, + "num_gpu_blocks_override": { + "title": "Num Gpu Blocks Override", + "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", + "type": "integer", + "nullable": true + }, + "enable_prefix_caching": { + "title": "Enable Prefix Caching", + "description": "Enables automatic prefix caching.", + "type": "boolean", + "nullable": true + }, + "model": { + "type": "string", + "title": "Model", + "description": "ID of the model to use.", + "example": "mixtral-8x7b-instruct" + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "description": "Path to the checkpoint to load the model from.", + "type": "string", + "nullable": true + }, + "num_shards": { + "title": "Num Shards", + "description": "\nSuggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config.\nSystem may decide to use a different number than the given value.\n", + "default": 1, + "type": "integer", + "minimum": 1.0, + "nullable": true + }, + "max_context_length": { + "title": "Max Context Length", + "description": "Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len.", + "type": "integer", + "minimum": 1.0, + "nullable": true + }, + "response_role": { + "title": "Response Role", + "description": "Role of the response in the conversation. Only supported in chat completions.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "model" + ], + "title": "BatchCompletionsModelConfig" + }, + "BatchJobSerializationFormat": { + "type": "string", + "enum": [ + "JSON", + "PICKLE" + ], + "title": "BatchJobSerializationFormat" + }, + "BatchJobStatus": { + "type": "string", + "enum": [ + "PENDING", + "RUNNING", + "SUCCESS", + "FAILURE", + "CANCELLED", + "UNDEFINED", + "TIMEOUT" + ], + "title": "BatchJobStatus" + }, + "Body_upload_file_v1_files_post": { + "properties": { + "file": { + "type": "string", + "format": "binary", + "title": "File" + } + }, + "type": "object", + "required": [ + "file" + ], + "title": "Body_upload_file_v1_files_post" + }, + "CallbackAuth": { + "oneOf": [ + { + "$ref": "#/components/schemas/CallbackBasicAuth" + }, + { + "$ref": "#/components/schemas/CallbackmTLSAuth" + } + ], + "title": "CallbackAuth", + "discriminator": { + "propertyName": "kind", + "mapping": { + "basic": "#/components/schemas/CallbackBasicAuth", + "mtls": "#/components/schemas/CallbackmTLSAuth" + } + } + }, + "CallbackBasicAuth": { + "properties": { + "kind": { + "type": "string", + "title": "Kind", + "enum": [ + "basic" + ] + }, + "username": { + "type": "string", + "title": "Username" + }, + "password": { + "type": "string", + "title": "Password" + } + }, + "type": "object", + "required": [ + "kind", + "username", + "password" + ], + "title": "CallbackBasicAuth" + }, + "CallbackmTLSAuth": { + "properties": { + "kind": { + "type": "string", + "title": "Kind", + "enum": [ + "mtls" + ] + }, + "cert": { + "type": "string", + "title": "Cert" + }, + "key": { + "type": "string", + "title": "Key" + } + }, + "type": "object", + "required": [ + "kind", + "cert", + "key" + ], + "title": "CallbackmTLSAuth" + }, + "CancelBatchCompletionsV2Response": { + "properties": { + "success": { + "type": "boolean", + "title": "Success", + "description": "Whether the cancellation was successful" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "CancelBatchCompletionsV2Response" + }, + "CancelFineTuneResponse": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "CancelFineTuneResponse" + }, + "ChatCompletionFunctionCallOption": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the function to call." + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "ChatCompletionFunctionCallOption" + }, + "ChatCompletionFunctions": { + "properties": { + "description": { + "title": "Description", + "description": "A description of what the function does, used by the model to choose when and how to call the function.", + "type": "string", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name", + "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." + }, + "parameters": { + "$ref": "#/components/schemas/FunctionParameters", + "nullable": true + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "ChatCompletionFunctions" + }, + "ChatCompletionMessageToolCall": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "The ID of the tool call." + }, + "type": { + "type": "string", + "title": "Type", + "description": "The type of the tool. Currently, only `function` is supported.", + "enum": [ + "function" + ] + }, + "function": { + "$ref": "#/components/schemas/Function1", + "description": "The function that the model called." + } + }, + "type": "object", + "required": [ + "id", + "type", + "function" + ], + "title": "ChatCompletionMessageToolCall" + }, + "ChatCompletionMessageToolCallChunk": { + "properties": { + "index": { + "type": "integer", + "title": "Index" + }, + "id": { + "title": "Id", + "description": "The ID of the tool call.", + "type": "string", + "nullable": true + }, + "type": { + "title": "Type", + "description": "The type of the tool. Currently, only `function` is supported.", + "type": "string", + "nullable": true, + "enum": [ + "function" + ] + }, + "function": { + "$ref": "#/components/schemas/Function2", + "nullable": true + } + }, + "type": "object", + "required": [ + "index" + ], + "title": "ChatCompletionMessageToolCallChunk" + }, + "ChatCompletionMessageToolCalls-Input": { + "items": { + "$ref": "#/components/schemas/ChatCompletionMessageToolCall" + }, + "type": "array", + "title": "ChatCompletionMessageToolCalls", + "description": "The tool calls generated by the model, such as function calls." + }, + "ChatCompletionMessageToolCalls-Output": { + "items": { + "$ref": "#/components/schemas/ChatCompletionMessageToolCall" + }, + "type": "array", + "title": "ChatCompletionMessageToolCalls", + "description": "The tool calls generated by the model, such as function calls." + }, + "ChatCompletionNamedToolChoice": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the tool. Currently, only `function` is supported.", + "enum": [ + "function" + ] + }, + "function": { + "$ref": "#/components/schemas/Function3" + } + }, + "type": "object", + "required": [ + "type", + "function" + ], + "title": "ChatCompletionNamedToolChoice" + }, + "ChatCompletionRequestAssistantMessage": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/Content" + } + ], + "title": "Content", + "description": "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.\n", + "nullable": true + }, + "refusal": { + "title": "Refusal", + "description": "The refusal message by the assistant.", + "type": "string", + "nullable": true + }, + "role": { + "type": "string", + "title": "Role", + "description": "The role of the messages author, in this case `assistant`.", + "enum": [ + "assistant" + ] + }, + "name": { + "title": "Name", + "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", + "type": "string", + "nullable": true + }, + "audio": { + "description": "Data about a previous audio response from the model. \n[Learn more](/docs/guides/audio).\n", + "$ref": "#/components/schemas/Audio", + "nullable": true + }, + "tool_calls": { + "$ref": "#/components/schemas/ChatCompletionMessageToolCalls-Input", + "nullable": true + }, + "function_call": { + "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", + "$ref": "#/components/schemas/FunctionCall", + "nullable": true + } + }, + "type": "object", + "required": [ + "role" + ], + "title": "ChatCompletionRequestAssistantMessage" + }, + "ChatCompletionRequestAssistantMessageContentPart": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartRefusal" + } + ], + "title": "ChatCompletionRequestAssistantMessageContentPart" + }, + "ChatCompletionRequestDeveloperMessage": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/Content1" + } + ], + "title": "Content", + "description": "The contents of the developer message." + }, + "role": { + "type": "string", + "title": "Role", + "description": "The role of the messages author, in this case `developer`.", + "enum": [ + "developer" + ] + }, + "name": { + "title": "Name", + "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "ChatCompletionRequestDeveloperMessage" + }, + "ChatCompletionRequestFunctionMessage": { + "properties": { + "role": { + "type": "string", + "title": "Role", + "description": "The role of the messages author, in this case `function`.", + "enum": [ + "function" + ] + }, + "content": { + "title": "Content", + "description": "The contents of the function message.", + "type": "string", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name", + "description": "The name of the function to call." + } + }, + "type": "object", + "required": [ + "role", + "name" + ], + "title": "ChatCompletionRequestFunctionMessage" + }, + "ChatCompletionRequestMessage": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionRequestDeveloperMessage" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestSystemMessage" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestUserMessage" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestAssistantMessage" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestToolMessage" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestFunctionMessage" + } + ], + "title": "ChatCompletionRequestMessage" + }, + "ChatCompletionRequestMessageContentPartAudio": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the content part. Always `input_audio`.", + "enum": [ + "input_audio" + ] + }, + "input_audio": { + "$ref": "#/components/schemas/InputAudio" + } + }, + "type": "object", + "required": [ + "type", + "input_audio" + ], + "title": "ChatCompletionRequestMessageContentPartAudio" + }, + "ChatCompletionRequestMessageContentPartFile": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the content part. Always `file`.", + "enum": [ + "file" + ] + }, + "file": { + "$ref": "#/components/schemas/File" + } + }, + "type": "object", + "required": [ + "type", + "file" + ], + "title": "ChatCompletionRequestMessageContentPartFile" + }, + "ChatCompletionRequestMessageContentPartImage": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the content part.", + "enum": [ + "image_url" + ] + }, + "image_url": { + "$ref": "#/components/schemas/ImageUrl" + } + }, + "type": "object", + "required": [ + "type", + "image_url" + ], + "title": "ChatCompletionRequestMessageContentPartImage" + }, + "ChatCompletionRequestMessageContentPartRefusal": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the content part.", + "enum": [ + "refusal" + ] + }, + "refusal": { + "type": "string", + "title": "Refusal", + "description": "The refusal message generated by the model." + } + }, + "type": "object", + "required": [ + "type", + "refusal" + ], + "title": "ChatCompletionRequestMessageContentPartRefusal" + }, + "ChatCompletionRequestMessageContentPartText": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the content part.", + "enum": [ + "text" + ] + }, + "text": { + "type": "string", + "title": "Text", + "description": "The text content." + } + }, + "type": "object", + "required": [ + "type", + "text" + ], + "title": "ChatCompletionRequestMessageContentPartText" + }, + "ChatCompletionRequestSystemMessage": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/Content2" + } + ], + "title": "Content", + "description": "The contents of the system message." + }, + "role": { + "type": "string", + "title": "Role", + "description": "The role of the messages author, in this case `system`.", + "enum": [ + "system" + ] + }, + "name": { + "title": "Name", + "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "ChatCompletionRequestSystemMessage" + }, + "ChatCompletionRequestSystemMessageContentPart": { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText", + "title": "ChatCompletionRequestSystemMessageContentPart" + }, + "ChatCompletionRequestToolMessage": { + "properties": { + "role": { + "type": "string", + "title": "Role", + "description": "The role of the messages author, in this case `tool`.", + "enum": [ + "tool" + ] + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/Content3" + } + ], + "title": "Content", + "description": "The contents of the tool message." + }, + "tool_call_id": { + "type": "string", + "title": "Tool Call Id", + "description": "Tool call that this message is responding to." + } + }, + "type": "object", + "required": [ + "role", + "content", + "tool_call_id" + ], + "title": "ChatCompletionRequestToolMessage" + }, + "ChatCompletionRequestToolMessageContentPart": { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText", + "title": "ChatCompletionRequestToolMessageContentPart" + }, + "ChatCompletionRequestUserMessage": { + "properties": { + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/Content4" + } + ], + "title": "Content", + "description": "The contents of the user message.\n" + }, + "role": { + "type": "string", + "title": "Role", + "description": "The role of the messages author, in this case `user`.", + "enum": [ + "user" + ] + }, + "name": { + "title": "Name", + "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "content", + "role" + ], + "title": "ChatCompletionRequestUserMessage" + }, + "ChatCompletionRequestUserMessageContentPart": { + "anyOf": [ + { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartAudio" + }, + { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartFile" + } + ], + "title": "ChatCompletionRequestUserMessageContentPart" + }, + "ChatCompletionResponseMessage": { + "properties": { + "content": { + "title": "Content", + "description": "The contents of the message.", + "type": "string", + "nullable": true + }, + "refusal": { + "title": "Refusal", + "description": "The refusal message generated by the model.", + "type": "string", + "nullable": true + }, + "tool_calls": { + "$ref": "#/components/schemas/ChatCompletionMessageToolCalls-Output", + "nullable": true + }, + "annotations": { + "title": "Annotations", + "description": "Annotations for the message, when applicable, as when using the\n[web search tool](/docs/guides/tools-web-search?api-mode=chat).\n", + "items": { + "$ref": "#/components/schemas/Annotation" + }, + "type": "array", + "nullable": true + }, + "role": { + "type": "string", + "title": "Role", + "description": "The role of the author of this message.", + "enum": [ + "assistant" + ] + }, + "function_call": { + "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", + "$ref": "#/components/schemas/FunctionCall", + "nullable": true + }, + "audio": { + "description": "If the audio output modality is requested, this object contains data\nabout the audio response from the model. [Learn more](/docs/guides/audio).\n", + "$ref": "#/components/schemas/Audio1", + "nullable": true + } + }, + "type": "object", + "required": [ + "role" + ], + "title": "ChatCompletionResponseMessage" + }, + "ChatCompletionStreamOptions": { + "properties": { + "include_usage": { + "title": "Include Usage", + "description": "If set, an additional chunk will be streamed before the `data: [DONE]`\nmessage. The `usage` field on this chunk shows the token usage statistics\nfor the entire request, and the `choices` field will always be an empty\narray. \n\nAll other chunks will also include a `usage` field, but with a null\nvalue. **NOTE:** If the stream is interrupted, you may not receive the\nfinal usage chunk which contains the total token usage for the request.\n", + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "title": "ChatCompletionStreamOptions" + }, + "ChatCompletionStreamResponseDelta": { + "properties": { + "content": { + "title": "Content", + "description": "The contents of the chunk message.", + "type": "string", + "nullable": true + }, + "function_call": { + "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", + "$ref": "#/components/schemas/FunctionCall2", + "nullable": true + }, + "tool_calls": { + "title": "Tool Calls", + "items": { + "$ref": "#/components/schemas/ChatCompletionMessageToolCallChunk" + }, + "type": "array", + "nullable": true + }, + "role": { + "title": "Role", + "description": "The role of the author of this message.", + "type": "string", + "enum": [ + "developer", + "system", + "user", + "assistant", + "tool" + ], + "nullable": true + }, + "refusal": { + "title": "Refusal", + "description": "The refusal message generated by the model.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "title": "ChatCompletionStreamResponseDelta" + }, + "ChatCompletionTokenLogprob": { + "properties": { + "token": { + "type": "string", + "title": "Token", + "description": "The token." + }, + "logprob": { + "type": "number", + "title": "Logprob", + "description": "The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely." + }, + "bytes": { + "title": "Bytes", + "description": "A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "top_logprobs": { + "items": { + "$ref": "#/components/schemas/TopLogprob" + }, + "type": "array", + "title": "Top Logprobs", + "description": "List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned." + } + }, + "type": "object", + "required": [ + "token", + "logprob", + "bytes", + "top_logprobs" + ], + "title": "ChatCompletionTokenLogprob" + }, + "ChatCompletionTool": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the tool. Currently, only `function` is supported.", + "enum": [ + "function" + ] + }, + "function": { + "$ref": "#/components/schemas/FunctionObject" + } + }, + "type": "object", + "required": [ + "type", + "function" + ], + "title": "ChatCompletionTool" + }, + "ChatCompletionToolChoiceOption": { + "anyOf": [ + { + "type": "string", + "enum": [ + "none", + "auto", + "required" + ] + }, + { + "$ref": "#/components/schemas/ChatCompletionNamedToolChoice" + } + ], + "title": "ChatCompletionToolChoiceOption", + "description": "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present.\n" + }, + "ChatCompletionV2Request": { + "properties": { + "best_of": { + "title": "Best Of", + "description": "Number of output sequences that are generated from the prompt.\n From these `best_of` sequences, the top `n` sequences are returned.\n `best_of` must be greater than or equal to `n`. This is treated as\n the beam width when `use_beam_search` is True. By default, `best_of`\n is set to `n`.", + "type": "integer", + "nullable": true + }, + "top_k": { + "title": "Top K", + "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", + "type": "integer", + "minimum": -1.0, + "nullable": true + }, + "min_p": { + "title": "Min P", + "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", + "type": "number", + "nullable": true + }, + "use_beam_search": { + "title": "Use Beam Search", + "description": "Whether to use beam search for sampling.", + "type": "boolean", + "nullable": true + }, + "length_penalty": { + "title": "Length Penalty", + "description": "Float that penalizes sequences based on their length.\n Used in beam search.", + "type": "number", + "nullable": true + }, + "repetition_penalty": { + "title": "Repetition Penalty", + "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", + "type": "number", + "nullable": true + }, + "early_stopping": { + "title": "Early Stopping", + "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", + "type": "boolean", + "nullable": true + }, + "stop_token_ids": { + "title": "Stop Token Ids", + "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "include_stop_str_in_output": { + "title": "Include Stop Str In Output", + "description": "Whether to include the stop strings in\n output text. Defaults to False.", + "type": "boolean", + "nullable": true + }, + "ignore_eos": { + "title": "Ignore Eos", + "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", + "type": "boolean", + "nullable": true + }, + "min_tokens": { + "title": "Min Tokens", + "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", + "type": "integer", + "nullable": true + }, + "skip_special_tokens": { + "title": "Skip Special Tokens", + "description": "Whether to skip special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "spaces_between_special_tokens": { + "title": "Spaces Between Special Tokens", + "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "echo": { + "title": "Echo", + "description": "If true, the new message will be prepended with the last message if they belong to the same role.", + "type": "boolean", + "nullable": true + }, + "add_generation_prompt": { + "title": "Add Generation Prompt", + "description": "If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model.", + "type": "boolean", + "nullable": true + }, + "continue_final_message": { + "title": "Continue Final Message", + "description": "If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`.", + "type": "boolean", + "nullable": true + }, + "add_special_tokens": { + "title": "Add Special Tokens", + "description": "If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default).", + "type": "boolean", + "nullable": true + }, + "documents": { + "title": "Documents", + "description": "A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys.", + "items": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "type": "array", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given", + "type": "string", + "nullable": true + }, + "chat_template_kwargs": { + "title": "Chat Template Kwargs", + "description": "Additional kwargs to pass to the template renderer. Will be accessible by the chat template.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_json": { + "title": "Guided Json", + "description": "JSON schema for guided decoding. Only supported in vllm.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_regex": { + "title": "Guided Regex", + "description": "Regex for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_choice": { + "title": "Guided Choice", + "description": "Choices for guided decoding. Only supported in vllm.", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "guided_grammar": { + "title": "Guided Grammar", + "description": "Context-free grammar for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_decoding_backend": { + "title": "Guided Decoding Backend", + "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", + "type": "string", + "nullable": true + }, + "guided_whitespace_pattern": { + "title": "Guided Whitespace Pattern", + "description": "If specified, will override the default whitespace pattern for guided json decoding.", + "type": "string", + "nullable": true + }, + "priority": { + "title": "Priority", + "description": "The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.", + "type": "integer", + "nullable": true + }, + "metadata": { + "$ref": "#/components/schemas/Metadata", + "nullable": true + }, + "temperature": { + "title": "Temperature", + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.\n", + "default": 1, + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "top_p": { + "title": "Top P", + "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", + "default": 1, + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "user": { + "title": "User", + "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", + "type": "string", + "nullable": true, + "example": "user-1234" + }, + "service_tier": { + "$ref": "#/components/schemas/ServiceTier", + "nullable": true + }, + "messages": { + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestMessage" + }, + "type": "array", + "minItems": 1, + "title": "Messages", + "description": "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n" + }, + "model": { + "type": "string", + "title": "Model", + "description": "ID of the model to use.", + "example": "mixtral-8x7b-instruct" + }, + "modalities": { + "$ref": "#/components/schemas/ResponseModalities", + "nullable": true + }, + "reasoning_effort": { + "$ref": "#/components/schemas/ReasoningEffort", + "nullable": true + }, + "max_completion_tokens": { + "title": "Max Completion Tokens", + "description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n", + "type": "integer", + "nullable": true + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim.\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "presence_penalty": { + "title": "Presence Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics.\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "web_search_options": { + "title": "Web search", + "description": "This tool searches the web for relevant results to use in a response.\nLearn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).\n", + "$ref": "#/components/schemas/WebSearchOptions", + "nullable": true + }, + "top_logprobs": { + "title": "Top Logprobs", + "description": "An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n", + "type": "integer", + "maximum": 20.0, + "minimum": 0.0, + "nullable": true + }, + "response_format": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResponseFormatText" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonSchema" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonObject" + } + ], + "title": "Response Format", + "description": "An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.\n", + "nullable": true + }, + "audio": { + "description": "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](/docs/guides/audio).\n", + "$ref": "#/components/schemas/Audio2", + "nullable": true + }, + "store": { + "title": "Store", + "description": "Whether or not to store the output of this chat completion request for \nuse in our [model distillation](/docs/guides/distillation) or\n[evals](/docs/guides/evals) products.\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "stream": { + "title": "Stream", + "description": "If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "stop": { + "$ref": "#/components/schemas/StopConfiguration", + "nullable": true + }, + "logit_bias": { + "title": "Logit Bias", + "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the\ntokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should\ndecrease or increase likelihood of selection; values like -100 or 100\nshould result in a ban or exclusive selection of the relevant token.\n", + "additionalProperties": { + "type": "integer" + }, + "type": "object", + "nullable": true + }, + "logprobs": { + "title": "Logprobs", + "description": "Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the\n`content` of `message`.\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "max_tokens": { + "title": "Max Tokens", + "description": "The maximum number of [tokens](/tokenizer) that can be generated in the\nchat completion. This value can be used to control\n[costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with [o-series models](/docs/guides/reasoning).\n", + "type": "integer", + "nullable": true + }, + "n": { + "title": "N", + "description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.", + "default": 1, + "type": "integer", + "maximum": 128.0, + "minimum": 1.0, + "nullable": true, + "example": 1 + }, + "prediction": { + "description": "Configuration for a [Predicted Output](/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.\n", + "$ref": "#/components/schemas/PredictionContent", + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", + "type": "integer", + "maximum": 9.223372036854776e+18, + "minimum": -9.223372036854776e+18, + "nullable": true + }, + "stream_options": { + "$ref": "#/components/schemas/ChatCompletionStreamOptions", + "nullable": true + }, + "tools": { + "title": "Tools", + "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n", + "items": { + "$ref": "#/components/schemas/ChatCompletionTool" + }, + "type": "array", + "nullable": true + }, + "tool_choice": { + "$ref": "#/components/schemas/ChatCompletionToolChoiceOption", + "nullable": true + }, + "parallel_tool_calls": { + "$ref": "#/components/schemas/ParallelToolCalls", + "nullable": true + }, + "function_call": { + "anyOf": [ + { + "type": "string", + "enum": [ + "none", + "auto" + ] + }, + { + "$ref": "#/components/schemas/ChatCompletionFunctionCallOption" + } + ], + "title": "Function Call", + "description": "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n\n`none` means the model will not call a function and instead generates a\nmessage.\n\n`auto` means the model can pick between generating a message or calling a\nfunction.\n\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the\nmodel to call that function.\n\n`none` is the default when no functions are present. `auto` is the default\nif functions are present.\n", + "nullable": true + }, + "functions": { + "title": "Functions", + "description": "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n", + "items": { + "$ref": "#/components/schemas/ChatCompletionFunctions" + }, + "type": "array", + "maxItems": 128, + "minItems": 1, + "nullable": true + } + }, + "type": "object", + "required": [ + "messages", + "model" + ], + "title": "ChatCompletionV2Request" + }, + "ChatCompletionV2StreamErrorChunk": { + "properties": { + "error": { + "$ref": "#/components/schemas/StreamError" + } + }, + "type": "object", + "required": [ + "error" + ], + "title": "ChatCompletionV2StreamErrorChunk" + }, + "Choice": { + "properties": { + "finish_reason": { + "type": "string", + "enum": [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call" + ], + "title": "Finish Reason", + "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n" + }, + "index": { + "type": "integer", + "title": "Index", + "description": "The index of the choice in the list of choices." + }, + "message": { + "$ref": "#/components/schemas/ChatCompletionResponseMessage" + }, + "logprobs": { + "description": "Log probability information for the choice.", + "$ref": "#/components/schemas/Logprobs", + "nullable": true + } + }, + "type": "object", + "required": [ + "finish_reason", + "index", + "message", + "logprobs" + ], + "title": "Choice" + }, + "Choice1": { + "properties": { + "delta": { + "$ref": "#/components/schemas/ChatCompletionStreamResponseDelta" + }, + "logprobs": { + "description": "Log probability information for the choice.", + "$ref": "#/components/schemas/Logprobs", + "nullable": true + }, + "finish_reason": { + "title": "Finish Reason", + "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.\n", + "type": "string", + "enum": [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call" + ], + "nullable": true + }, + "index": { + "type": "integer", + "title": "Index", + "description": "The index of the choice in the list of choices." + } + }, + "type": "object", + "required": [ + "delta", + "finish_reason", + "index" + ], + "title": "Choice1" + }, + "Choice2": { + "properties": { + "finish_reason": { + "type": "string", + "enum": [ + "stop", + "length", + "content_filter" + ], + "title": "Finish Reason", + "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\nor `content_filter` if content was omitted due to a flag from our content filters.\n" + }, + "index": { + "type": "integer", + "title": "Index" + }, + "logprobs": { + "$ref": "#/components/schemas/Logprobs2", + "nullable": true + }, + "text": { + "type": "string", + "title": "Text" + } + }, + "type": "object", + "required": [ + "finish_reason", + "index", + "logprobs", + "text" + ], + "title": "Choice2" + }, + "CloneModelBundleV1Request": { + "properties": { + "original_model_bundle_id": { + "type": "string", + "title": "Original Model Bundle Id" + }, + "new_app_config": { + "title": "New App Config", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "type": "object", + "required": [ + "original_model_bundle_id" + ], + "title": "CloneModelBundleV1Request", + "description": "Request object for cloning a Model Bundle from another one." + }, + "CloneModelBundleV2Request": { + "properties": { + "original_model_bundle_id": { + "type": "string", + "title": "Original Model Bundle Id" + }, + "new_app_config": { + "title": "New App Config", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "type": "object", + "required": [ + "original_model_bundle_id" + ], + "title": "CloneModelBundleV2Request", + "description": "Request object for cloning a Model Bundle from another one." + }, + "CloudpickleArtifactFlavor": { + "properties": { + "requirements": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Requirements" + }, + "framework": { + "oneOf": [ + { + "$ref": "#/components/schemas/PytorchFramework" + }, + { + "$ref": "#/components/schemas/TensorflowFramework" + }, + { + "$ref": "#/components/schemas/CustomFramework" + } + ], + "title": "Framework", + "discriminator": { + "propertyName": "framework_type", + "mapping": { + "custom_base_image": "#/components/schemas/CustomFramework", + "pytorch": "#/components/schemas/PytorchFramework", + "tensorflow": "#/components/schemas/TensorflowFramework" + } + } + }, + "app_config": { + "title": "App Config", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "location": { + "type": "string", + "title": "Location" + }, + "flavor": { + "type": "string", + "title": "Flavor", + "enum": [ + "cloudpickle_artifact" + ] + }, + "load_predict_fn": { + "type": "string", + "title": "Load Predict Fn" + }, + "load_model_fn": { + "type": "string", + "title": "Load Model Fn" + } + }, + "type": "object", + "required": [ + "requirements", + "framework", + "location", + "flavor", + "load_predict_fn", + "load_model_fn" + ], + "title": "CloudpickleArtifactFlavor", + "description": "This is the entity-layer class for the Model Bundle flavor of a cloudpickle artifact." + }, + "CompletionOutput": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "num_prompt_tokens": { + "title": "Num Prompt Tokens", + "type": "integer", + "nullable": true + }, + "num_completion_tokens": { + "type": "integer", + "title": "Num Completion Tokens" + }, + "tokens": { + "title": "Tokens", + "items": { + "$ref": "#/components/schemas/TokenOutput" + }, + "type": "array", + "nullable": true + } + }, + "type": "object", + "required": [ + "text", + "num_completion_tokens" + ], + "title": "CompletionOutput", + "description": "Represents the output of a completion request to a model." + }, + "CompletionStreamOutput": { + "properties": { + "text": { + "type": "string", + "title": "Text" + }, + "finished": { + "type": "boolean", + "title": "Finished" + }, + "num_prompt_tokens": { + "title": "Num Prompt Tokens", + "type": "integer", + "nullable": true + }, + "num_completion_tokens": { + "title": "Num Completion Tokens", + "type": "integer", + "nullable": true + }, + "token": { + "$ref": "#/components/schemas/TokenOutput", + "nullable": true + } + }, + "type": "object", + "required": [ + "text", + "finished" + ], + "title": "CompletionStreamOutput" + }, + "CompletionStreamV1Request": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt" + }, + "max_new_tokens": { + "type": "integer", + "title": "Max New Tokens" + }, + "temperature": { + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "title": "Temperature" + }, + "stop_sequences": { + "title": "Stop Sequences", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "return_token_log_probs": { + "title": "Return Token Log Probs", + "default": false, + "type": "boolean", + "nullable": true + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true + }, + "top_k": { + "title": "Top K", + "type": "integer", + "minimum": -1.0, + "nullable": true + }, + "top_p": { + "title": "Top P", + "type": "number", + "maximum": 1.0, + "exclusiveMinimum": 0.0, + "nullable": true + }, + "include_stop_str_in_output": { + "title": "Include Stop Str In Output", + "type": "boolean", + "nullable": true + }, + "guided_json": { + "title": "Guided Json", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_regex": { + "title": "Guided Regex", + "type": "string", + "nullable": true + }, + "guided_choice": { + "title": "Guided Choice", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "guided_grammar": { + "title": "Guided Grammar", + "type": "string", + "nullable": true + }, + "skip_special_tokens": { + "title": "Skip Special Tokens", + "default": true, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "prompt", + "max_new_tokens", + "temperature" + ], + "title": "CompletionStreamV1Request", + "description": "Request object for a stream prompt completion task." + }, + "CompletionStreamV1Response": { + "properties": { + "request_id": { + "title": "Request Id", + "type": "string", + "nullable": true + }, + "output": { + "$ref": "#/components/schemas/CompletionStreamOutput", + "nullable": true + }, + "error": { + "$ref": "#/components/schemas/StreamError", + "nullable": true + } + }, + "type": "object", + "required": [ + "request_id" + ], + "title": "CompletionStreamV1Response", + "description": "Error of the response (if any)." + }, + "CompletionSyncV1Request": { + "properties": { + "prompt": { + "type": "string", + "title": "Prompt" + }, + "max_new_tokens": { + "type": "integer", + "title": "Max New Tokens" + }, + "temperature": { + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "title": "Temperature" + }, + "stop_sequences": { + "title": "Stop Sequences", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "return_token_log_probs": { + "title": "Return Token Log Probs", + "default": false, + "type": "boolean", + "nullable": true + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true + }, + "top_k": { + "title": "Top K", + "type": "integer", + "minimum": -1.0, + "nullable": true + }, + "top_p": { + "title": "Top P", + "type": "number", + "maximum": 1.0, + "exclusiveMinimum": 0.0, + "nullable": true + }, + "include_stop_str_in_output": { + "title": "Include Stop Str In Output", + "type": "boolean", + "nullable": true + }, + "guided_json": { + "title": "Guided Json", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_regex": { + "title": "Guided Regex", + "type": "string", + "nullable": true + }, + "guided_choice": { + "title": "Guided Choice", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "guided_grammar": { + "title": "Guided Grammar", + "type": "string", + "nullable": true + }, + "skip_special_tokens": { + "title": "Skip Special Tokens", + "default": true, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "prompt", + "max_new_tokens", + "temperature" + ], + "title": "CompletionSyncV1Request", + "description": "Request object for a synchronous prompt completion task." + }, + "CompletionSyncV1Response": { + "properties": { + "request_id": { + "title": "Request Id", + "type": "string", + "nullable": true + }, + "output": { + "$ref": "#/components/schemas/CompletionOutput", + "nullable": true + } + }, + "type": "object", + "title": "CompletionSyncV1Response", + "description": "Response object for a synchronous prompt completion." + }, + "CompletionTokensDetails": { + "properties": { + "accepted_prediction_tokens": { + "type": "integer", + "title": "Accepted Prediction Tokens", + "description": "When using Predicted Outputs, the number of tokens in the\nprediction that appeared in the completion.\n", + "default": 0 + }, + "audio_tokens": { + "type": "integer", + "title": "Audio Tokens", + "description": "Audio input tokens generated by the model.", + "default": 0 + }, + "reasoning_tokens": { + "type": "integer", + "title": "Reasoning Tokens", + "description": "Tokens generated by the model for reasoning.", + "default": 0 + }, + "rejected_prediction_tokens": { + "type": "integer", + "title": "Rejected Prediction Tokens", + "description": "When using Predicted Outputs, the number of tokens in the\nprediction that did not appear in the completion. However, like\nreasoning tokens, these tokens are still counted in the total\ncompletion tokens for purposes of billing, output, and context window\nlimits.\n", + "default": 0 + } + }, + "type": "object", + "title": "CompletionTokensDetails" + }, + "CompletionUsage": { + "properties": { + "completion_tokens": { + "type": "integer", + "title": "Completion Tokens", + "description": "Number of tokens in the generated completion." + }, + "prompt_tokens": { + "type": "integer", + "title": "Prompt Tokens", + "description": "Number of tokens in the prompt." + }, + "total_tokens": { + "type": "integer", + "title": "Total Tokens", + "description": "Total number of tokens used in the request (prompt + completion)." + }, + "completion_tokens_details": { + "description": "Breakdown of tokens used in a completion.", + "$ref": "#/components/schemas/CompletionTokensDetails", + "nullable": true + }, + "prompt_tokens_details": { + "description": "Breakdown of tokens used in the prompt.", + "$ref": "#/components/schemas/PromptTokensDetails", + "nullable": true + } + }, + "type": "object", + "required": [ + "completion_tokens", + "prompt_tokens", + "total_tokens" + ], + "title": "CompletionUsage" + }, + "CompletionV2Request": { + "properties": { + "best_of": { + "title": "Best Of", + "description": "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return \u2013 `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", + "default": 1, + "type": "integer", + "maximum": 20.0, + "minimum": 0.0, + "nullable": true + }, + "top_k": { + "title": "Top K", + "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", + "type": "integer", + "minimum": -1.0, + "nullable": true + }, + "min_p": { + "title": "Min P", + "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", + "type": "number", + "nullable": true + }, + "use_beam_search": { + "title": "Use Beam Search", + "description": "Whether to use beam search for sampling.", + "type": "boolean", + "nullable": true + }, + "length_penalty": { + "title": "Length Penalty", + "description": "Float that penalizes sequences based on their length.\n Used in beam search.", + "type": "number", + "nullable": true + }, + "repetition_penalty": { + "title": "Repetition Penalty", + "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", + "type": "number", + "nullable": true + }, + "early_stopping": { + "title": "Early Stopping", + "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", + "type": "boolean", + "nullable": true + }, + "stop_token_ids": { + "title": "Stop Token Ids", + "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "include_stop_str_in_output": { + "title": "Include Stop Str In Output", + "description": "Whether to include the stop strings in output text.", + "type": "boolean", + "nullable": true + }, + "ignore_eos": { + "title": "Ignore Eos", + "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", + "type": "boolean", + "nullable": true + }, + "min_tokens": { + "title": "Min Tokens", + "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", + "type": "integer", + "nullable": true + }, + "skip_special_tokens": { + "title": "Skip Special Tokens", + "description": "Whether to skip special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "spaces_between_special_tokens": { + "title": "Spaces Between Special Tokens", + "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "add_special_tokens": { + "title": "Add Special Tokens", + "description": "If true (the default), special tokens (e.g. BOS) will be added to the prompt.", + "type": "boolean", + "nullable": true + }, + "response_format": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResponseFormatText" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonSchema" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonObject" + } + ], + "title": "Response Format", + "description": "Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported.", + "nullable": true + }, + "guided_json": { + "title": "Guided Json", + "description": "JSON schema for guided decoding. Only supported in vllm.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_regex": { + "title": "Guided Regex", + "description": "Regex for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_choice": { + "title": "Guided Choice", + "description": "Choices for guided decoding. Only supported in vllm.", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "guided_grammar": { + "title": "Guided Grammar", + "description": "Context-free grammar for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_decoding_backend": { + "title": "Guided Decoding Backend", + "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", + "type": "string", + "nullable": true + }, + "guided_whitespace_pattern": { + "title": "Guided Whitespace Pattern", + "description": "If specified, will override the default whitespace pattern for guided json decoding.", + "type": "string", + "nullable": true + }, + "model": { + "type": "string", + "title": "Model", + "description": "ID of the model to use.", + "example": "mixtral-8x7b-instruct" + }, + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/Prompt" + }, + { + "$ref": "#/components/schemas/Prompt1" + } + ], + "title": "Prompt", + "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", + "nullable": true + }, + "echo": { + "title": "Echo", + "description": "Echo back the prompt in addition to the completion\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "logit_bias": { + "title": "Logit Bias", + "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n", + "additionalProperties": { + "type": "integer" + }, + "type": "object", + "nullable": true + }, + "logprobs": { + "title": "Logprobs", + "description": "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n", + "type": "integer", + "maximum": 5.0, + "minimum": 0.0, + "nullable": true + }, + "max_tokens": { + "title": "Max Tokens", + "description": "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n", + "default": 16, + "type": "integer", + "minimum": 0.0, + "nullable": true, + "example": 16 + }, + "n": { + "title": "N", + "description": "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", + "default": 1, + "type": "integer", + "maximum": 128.0, + "minimum": 1.0, + "nullable": true, + "example": 1 + }, + "presence_penalty": { + "title": "Presence Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", + "type": "integer", + "nullable": true + }, + "stop": { + "$ref": "#/components/schemas/StopConfiguration", + "nullable": true + }, + "stream": { + "title": "Stream", + "description": "If set, partial message deltas will be sent. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "stream_options": { + "$ref": "#/components/schemas/ChatCompletionStreamOptions", + "nullable": true + }, + "suffix": { + "title": "Suffix", + "description": "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n", + "type": "string", + "nullable": true, + "example": "test." + }, + "temperature": { + "title": "Temperature", + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n", + "default": 1, + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "top_p": { + "title": "Top P", + "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", + "default": 1, + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "user": { + "title": "User", + "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", + "type": "string", + "nullable": true, + "example": "user-1234" + } + }, + "type": "object", + "required": [ + "model", + "prompt" + ], + "title": "CompletionV2Request" + }, + "CompletionV2StreamErrorChunk": { + "properties": { + "error": { + "$ref": "#/components/schemas/StreamError" + } + }, + "type": "object", + "required": [ + "error" + ], + "title": "CompletionV2StreamErrorChunk" + }, + "Content": { + "title": "Content", + "description": "An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.", + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestAssistantMessageContentPart" + }, + "type": "array", + "minItems": 1, + "nullable": true + }, + "Content1": { + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" + }, + "type": "array", + "minItems": 1, + "title": "Content1", + "description": "An array of content parts with a defined type. For developer messages, only type `text` is supported." + }, + "Content2": { + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestSystemMessageContentPart" + }, + "type": "array", + "minItems": 1, + "title": "Content2", + "description": "An array of content parts with a defined type. For system messages, only type `text` is supported." + }, + "Content3": { + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestToolMessageContentPart" + }, + "type": "array", + "minItems": 1, + "title": "Content3", + "description": "An array of content parts with a defined type. For tool messages, only type `text` is supported." + }, + "Content4": { + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestUserMessageContentPart" + }, + "type": "array", + "minItems": 1, + "title": "Content4", + "description": "An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text, image, or audio inputs." + }, + "Content8": { + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestMessageContentPartText" + }, + "type": "array", + "minItems": 1, + "title": "Content8", + "description": "An array of content parts with a defined type. Supported options differ based on the [model](/docs/models) being used to generate the response. Can contain text inputs." + }, + "CreateAsyncTaskV1Response": { + "properties": { + "task_id": { + "type": "string", + "title": "Task Id" + } + }, + "type": "object", + "required": [ + "task_id" + ], + "title": "CreateAsyncTaskV1Response" + }, + "CreateBatchCompletionsV1ModelConfig": { + "properties": { + "max_model_len": { + "title": "Max Model Len", + "description": "Model context length, If unspecified, will be automatically derived from the model config", + "type": "integer", + "nullable": true + }, + "max_num_seqs": { + "title": "Max Num Seqs", + "description": "Maximum number of sequences per iteration", + "type": "integer", + "nullable": true + }, + "enforce_eager": { + "title": "Enforce Eager", + "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", + "type": "boolean", + "nullable": true + }, + "trust_remote_code": { + "title": "Trust Remote Code", + "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", + "default": false, + "type": "boolean", + "nullable": true + }, + "pipeline_parallel_size": { + "title": "Pipeline Parallel Size", + "description": "Number of pipeline stages. Default to None.", + "type": "integer", + "nullable": true + }, + "tensor_parallel_size": { + "title": "Tensor Parallel Size", + "description": "Number of tensor parallel replicas. Default to None.", + "type": "integer", + "nullable": true + }, + "quantization": { + "title": "Quantization", + "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", + "type": "string", + "nullable": true + }, + "disable_log_requests": { + "title": "Disable Log Requests", + "description": "Disable logging requests. Default to None.", + "type": "boolean", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "tool_call_parser": { + "title": "Tool Call Parser", + "description": "Tool call parser", + "type": "string", + "nullable": true + }, + "enable_auto_tool_choice": { + "title": "Enable Auto Tool Choice", + "description": "Enable auto tool choice", + "type": "boolean", + "nullable": true + }, + "load_format": { + "title": "Load Format", + "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", + "type": "string", + "nullable": true + }, + "config_format": { + "title": "Config Format", + "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", + "type": "string", + "nullable": true + }, + "tokenizer_mode": { + "title": "Tokenizer Mode", + "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", + "type": "string", + "nullable": true + }, + "limit_mm_per_prompt": { + "title": "Limit Mm Per Prompt", + "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", + "type": "string", + "nullable": true + }, + "max_num_batched_tokens": { + "title": "Max Num Batched Tokens", + "description": "Maximum number of batched tokens per iteration", + "type": "integer", + "nullable": true + }, + "tokenizer": { + "title": "Tokenizer", + "description": "Name or path of the huggingface tokenizer to use.", + "type": "string", + "nullable": true + }, + "dtype": { + "title": "Dtype", + "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", + "type": "string", + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "Random seed for the model.", + "type": "integer", + "nullable": true + }, + "revision": { + "title": "Revision", + "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "code_revision": { + "title": "Code Revision", + "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "rope_scaling": { + "title": "Rope Scaling", + "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "tokenizer_revision": { + "title": "Tokenizer Revision", + "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "quantization_param_path": { + "title": "Quantization Param Path", + "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", + "type": "string", + "nullable": true + }, + "max_seq_len_to_capture": { + "title": "Max Seq Len To Capture", + "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", + "type": "integer", + "nullable": true + }, + "disable_sliding_window": { + "title": "Disable Sliding Window", + "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", + "type": "boolean", + "nullable": true + }, + "skip_tokenizer_init": { + "title": "Skip Tokenizer Init", + "description": "If true, skip initialization of tokenizer and detokenizer.", + "type": "boolean", + "nullable": true + }, + "served_model_name": { + "title": "Served Model Name", + "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", + "type": "string", + "nullable": true + }, + "override_neuron_config": { + "title": "Override Neuron Config", + "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "mm_processor_kwargs": { + "title": "Mm Processor Kwargs", + "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "block_size": { + "title": "Block Size", + "description": "Size of a cache block in number of tokens.", + "type": "integer", + "nullable": true + }, + "gpu_memory_utilization": { + "title": "Gpu Memory Utilization", + "description": "Fraction of GPU memory to use for the vLLM execution.", + "type": "number", + "nullable": true + }, + "swap_space": { + "title": "Swap Space", + "description": "Size of the CPU swap space per GPU (in GiB).", + "type": "number", + "nullable": true + }, + "cache_dtype": { + "title": "Cache Dtype", + "description": "Data type for kv cache storage.", + "type": "string", + "nullable": true + }, + "num_gpu_blocks_override": { + "title": "Num Gpu Blocks Override", + "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", + "type": "integer", + "nullable": true + }, + "enable_prefix_caching": { + "title": "Enable Prefix Caching", + "description": "Enables automatic prefix caching.", + "type": "boolean", + "nullable": true + }, + "model": { + "type": "string", + "title": "Model", + "description": "ID of the model to use.", + "example": "mixtral-8x7b-instruct" + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "description": "Path to the checkpoint to load the model from.", + "type": "string", + "nullable": true + }, + "num_shards": { + "title": "Num Shards", + "description": "\nSuggested number of shards to distribute the model. When not specified, will infer the number of shards based on model config.\nSystem may decide to use a different number than the given value.\n", + "default": 1, + "type": "integer", + "minimum": 1.0, + "nullable": true + }, + "max_context_length": { + "title": "Max Context Length", + "description": "Maximum context length to use for the model. Defaults to the max allowed by the model. Deprecated in favor of max_model_len.", + "type": "integer", + "minimum": 1.0, + "nullable": true + }, + "response_role": { + "title": "Response Role", + "description": "Role of the response in the conversation. Only supported in chat completions.", + "type": "string", + "nullable": true + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels", + "description": "Labels to attach to the batch inference job.", + "default": {} + } + }, + "type": "object", + "required": [ + "model" + ], + "title": "CreateBatchCompletionsV1ModelConfig" + }, + "CreateBatchCompletionsV1Request": { + "properties": { + "input_data_path": { + "title": "Input Data Path", + "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", + "type": "string", + "nullable": true + }, + "output_data_path": { + "type": "string", + "title": "Output Data Path", + "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels", + "description": "Labels to attach to the batch inference job.", + "default": {} + }, + "data_parallelism": { + "title": "Data Parallelism", + "description": "Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference.", + "default": 1, + "type": "integer", + "maximum": 64.0, + "minimum": 1.0, + "nullable": true + }, + "max_runtime_sec": { + "title": "Max Runtime Sec", + "description": "Maximum runtime of the batch inference in seconds. Default to one day.", + "default": 86400, + "type": "integer", + "maximum": 172800.0, + "minimum": 1.0, + "nullable": true + }, + "priority": { + "title": "Priority", + "description": "Priority of the batch inference job. Default to None.", + "type": "string", + "nullable": true + }, + "tool_config": { + "description": "\nConfiguration for tool use.\nNOTE: this config is highly experimental and signature will change significantly in future iterations.", + "$ref": "#/components/schemas/ToolConfig", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "description": "CPUs to use for the batch inference.", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "description": "Number of GPUs to use for the batch inference.", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "description": "Amount of memory to use for the batch inference.", + "nullable": true + }, + "gpu_type": { + "description": "GPU type to use for the batch inference.", + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "description": "Storage to use for the batch inference.", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "description": "Number of nodes per worker for the batch inference.", + "type": "integer", + "nullable": true + }, + "content": { + "$ref": "#/components/schemas/CreateBatchCompletionsV1RequestContent", + "nullable": true + }, + "model_config": { + "$ref": "#/components/schemas/CreateBatchCompletionsV1ModelConfig" + } + }, + "type": "object", + "required": [ + "output_data_path", + "model_config" + ], + "title": "CreateBatchCompletionsV1Request", + "description": "Request object for batch completions." + }, + "CreateBatchCompletionsV1RequestContent": { + "properties": { + "prompts": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Prompts" + }, + "max_new_tokens": { + "type": "integer", + "title": "Max New Tokens" + }, + "temperature": { + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "title": "Temperature" + }, + "stop_sequences": { + "title": "Stop Sequences", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "return_token_log_probs": { + "title": "Return Token Log Probs", + "default": false, + "type": "boolean", + "nullable": true + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true + }, + "top_k": { + "title": "Top K", + "type": "integer", + "minimum": -1.0, + "nullable": true + }, + "top_p": { + "title": "Top P", + "type": "number", + "maximum": 1.0, + "exclusiveMinimum": 0.0, + "nullable": true + }, + "skip_special_tokens": { + "title": "Skip Special Tokens", + "default": true, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "prompts", + "max_new_tokens", + "temperature" + ], + "title": "CreateBatchCompletionsV1RequestContent" + }, + "CreateBatchCompletionsV1Response": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + } + }, + "type": "object", + "required": [ + "job_id" + ], + "title": "CreateBatchCompletionsV1Response" + }, + "CreateBatchCompletionsV2Request": { + "properties": { + "input_data_path": { + "title": "Input Data Path", + "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", + "type": "string", + "nullable": true + }, + "output_data_path": { + "type": "string", + "title": "Output Data Path", + "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels", + "description": "Labels to attach to the batch inference job.", + "default": {} + }, + "data_parallelism": { + "title": "Data Parallelism", + "description": "Number of replicas to run the batch inference. More replicas are slower to schedule but faster to inference.", + "default": 1, + "type": "integer", + "maximum": 64.0, + "minimum": 1.0, + "nullable": true + }, + "max_runtime_sec": { + "title": "Max Runtime Sec", + "description": "Maximum runtime of the batch inference in seconds. Default to one day.", + "default": 86400, + "type": "integer", + "maximum": 172800.0, + "minimum": 1.0, + "nullable": true + }, + "priority": { + "title": "Priority", + "description": "Priority of the batch inference job. Default to None.", + "type": "string", + "nullable": true + }, + "tool_config": { + "description": "\nConfiguration for tool use.\nNOTE: this config is highly experimental and signature will change significantly in future iterations.", + "$ref": "#/components/schemas/ToolConfig", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "description": "CPUs to use for the batch inference.", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "description": "Number of GPUs to use for the batch inference.", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "description": "Amount of memory to use for the batch inference.", + "nullable": true + }, + "gpu_type": { + "description": "GPU type to use for the batch inference.", + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "description": "Storage to use for the batch inference.", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "description": "Number of nodes per worker for the batch inference.", + "type": "integer", + "nullable": true + }, + "content": { + "anyOf": [ + { + "$ref": "#/components/schemas/CreateBatchCompletionsV1RequestContent" + }, + { + "items": { + "$ref": "#/components/schemas/FilteredCompletionV2Request" + }, + "type": "array" + }, + { + "items": { + "$ref": "#/components/schemas/FilteredChatCompletionV2Request" + }, + "type": "array" + } + ], + "title": "Content", + "description": "\nEither `input_data_path` or `content` needs to be provided.\nWhen input_data_path is provided, the input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].\n", + "nullable": true + }, + "model_config": { + "$ref": "#/components/schemas/BatchCompletionsModelConfig", + "description": "Model configuration for the batch inference. Hardware configurations are inferred." + } + }, + "type": "object", + "required": [ + "output_data_path", + "model_config" + ], + "title": "CreateBatchCompletionsV2Request", + "description": "Request object for batch completions." + }, + "CreateBatchJobResourceRequests": { + "properties": { + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "max_workers": { + "title": "Max Workers", + "type": "integer", + "nullable": true + }, + "per_worker": { + "title": "Per Worker", + "type": "integer", + "nullable": true + }, + "concurrent_requests_per_worker": { + "title": "Concurrent Requests Per Worker", + "type": "integer", + "nullable": true + } + }, + "type": "object", + "title": "CreateBatchJobResourceRequests" + }, + "CreateBatchJobV1Request": { + "properties": { + "model_bundle_id": { + "type": "string", + "title": "Model Bundle Id" + }, + "input_path": { + "type": "string", + "title": "Input Path" + }, + "serialization_format": { + "$ref": "#/components/schemas/BatchJobSerializationFormat" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "resource_requests": { + "$ref": "#/components/schemas/CreateBatchJobResourceRequests" + }, + "timeout_seconds": { + "type": "number", + "title": "Timeout Seconds", + "default": 43200.0 + } + }, + "type": "object", + "required": [ + "model_bundle_id", + "input_path", + "serialization_format", + "labels", + "resource_requests" + ], + "title": "CreateBatchJobV1Request" + }, + "CreateBatchJobV1Response": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + } + }, + "type": "object", + "required": [ + "job_id" + ], + "title": "CreateBatchJobV1Response" + }, + "CreateChatCompletionResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "A unique identifier for the chat completion." + }, + "choices": { + "items": { + "$ref": "#/components/schemas/Choice" + }, + "type": "array", + "title": "Choices", + "description": "A list of chat completion choices. Can be more than one if `n` is greater than 1." + }, + "created": { + "type": "integer", + "title": "Created", + "description": "The Unix timestamp (in seconds) of when the chat completion was created." + }, + "model": { + "type": "string", + "title": "Model", + "description": "The model used for the chat completion." + }, + "service_tier": { + "$ref": "#/components/schemas/ServiceTier", + "nullable": true + }, + "system_fingerprint": { + "title": "System Fingerprint", + "description": "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n", + "type": "string", + "nullable": true + }, + "object": { + "type": "string", + "title": "Object", + "description": "The object type, which is always `chat.completion`.", + "enum": [ + "chat.completion" + ] + }, + "usage": { + "$ref": "#/components/schemas/CompletionUsage", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model", + "object" + ], + "title": "CreateChatCompletionResponse" + }, + "CreateChatCompletionStreamResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "A unique identifier for the chat completion. Each chunk has the same ID." + }, + "choices": { + "items": { + "$ref": "#/components/schemas/Choice1" + }, + "type": "array", + "title": "Choices", + "description": "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`.\n" + }, + "created": { + "type": "integer", + "title": "Created", + "description": "The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp." + }, + "model": { + "type": "string", + "title": "Model", + "description": "The model to generate the completion." + }, + "service_tier": { + "$ref": "#/components/schemas/ServiceTier", + "nullable": true + }, + "system_fingerprint": { + "title": "System Fingerprint", + "description": "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n", + "type": "string", + "nullable": true + }, + "object": { + "type": "string", + "title": "Object", + "description": "The object type, which is always `chat.completion.chunk`.", + "enum": [ + "chat.completion.chunk" + ] + }, + "usage": { + "description": "An optional field that will only be present when you set\n`stream_options: {\"include_usage\": true}` in your request. When present, it\ncontains a null value **except for the last chunk** which contains the\ntoken usage statistics for the entire request.\n\n**NOTE:** If the stream is interrupted or cancelled, you may not\nreceive the final usage chunk which contains the total token usage for\nthe request.\n", + "$ref": "#/components/schemas/CompletionUsage", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model", + "object" + ], + "title": "CreateChatCompletionStreamResponse" + }, + "CreateCompletionResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "A unique identifier for the completion." + }, + "choices": { + "items": { + "$ref": "#/components/schemas/Choice2" + }, + "type": "array", + "title": "Choices", + "description": "The list of completion choices the model generated for the input prompt." + }, + "created": { + "type": "integer", + "title": "Created", + "description": "The Unix timestamp (in seconds) of when the completion was created." + }, + "model": { + "type": "string", + "title": "Model", + "description": "The model used for completion." + }, + "system_fingerprint": { + "title": "System Fingerprint", + "description": "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.\n", + "type": "string", + "nullable": true + }, + "object": { + "type": "string", + "title": "Object", + "description": "The object type, which is always \"text_completion\"", + "enum": [ + "text_completion" + ] + }, + "usage": { + "$ref": "#/components/schemas/CompletionUsage", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "choices", + "created", + "model", + "object" + ], + "title": "CreateCompletionResponse" + }, + "CreateDeepSpeedModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name" + }, + "model_name": { + "type": "string", + "title": "Model Name" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "min_workers": { + "type": "integer", + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "title": "Per Worker" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "default": "hugging_face" + }, + "inference_framework_image_tag": { + "type": "string", + "title": "Inference Framework Image Tag", + "default": "latest" + }, + "num_shards": { + "type": "integer", + "title": "Num Shards", + "default": 1 + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType", + "default": "sync" + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "deepspeed", + "enum": [ + "deepspeed" + ] + } + }, + "type": "object", + "required": [ + "name", + "model_name", + "metadata", + "min_workers", + "max_workers", + "per_worker", + "labels" + ], + "title": "CreateDeepSpeedModelEndpointRequest" + }, + "CreateDockerImageBatchJobBundleV1Request": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "image_repository": { + "type": "string", + "title": "Image Repository" + }, + "image_tag": { + "type": "string", + "title": "Image Tag" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Command" + }, + "env": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Env", + "default": {} + }, + "mount_location": { + "title": "Mount Location", + "type": "string", + "nullable": true + }, + "resource_requests": { + "$ref": "#/components/schemas/CreateDockerImageBatchJobResourceRequests", + "default": {} + }, + "public": { + "title": "Public", + "default": false, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "name", + "image_repository", + "image_tag", + "command" + ], + "title": "CreateDockerImageBatchJobBundleV1Request" + }, + "CreateDockerImageBatchJobBundleV1Response": { + "properties": { + "docker_image_batch_job_bundle_id": { + "type": "string", + "title": "Docker Image Batch Job Bundle Id" + } + }, + "type": "object", + "required": [ + "docker_image_batch_job_bundle_id" + ], + "title": "CreateDockerImageBatchJobBundleV1Response" + }, + "CreateDockerImageBatchJobResourceRequests": { + "properties": { + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + } + }, + "type": "object", + "title": "CreateDockerImageBatchJobResourceRequests" + }, + "CreateDockerImageBatchJobV1Request": { + "properties": { + "docker_image_batch_job_bundle_name": { + "title": "Docker Image Batch Job Bundle Name", + "type": "string", + "nullable": true + }, + "docker_image_batch_job_bundle_id": { + "title": "Docker Image Batch Job Bundle Id", + "type": "string", + "nullable": true + }, + "job_config": { + "title": "Job Config", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "resource_requests": { + "$ref": "#/components/schemas/CreateDockerImageBatchJobResourceRequests", + "default": {} + }, + "override_job_max_runtime_s": { + "title": "Override Job Max Runtime S", + "type": "integer", + "nullable": true + } + }, + "type": "object", + "required": [ + "labels" + ], + "title": "CreateDockerImageBatchJobV1Request" + }, + "CreateDockerImageBatchJobV1Response": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + } + }, + "type": "object", + "required": [ + "job_id" + ], + "title": "CreateDockerImageBatchJobV1Response" + }, + "CreateFineTuneRequest": { + "properties": { + "model": { + "type": "string", + "title": "Model" + }, + "training_file": { + "type": "string", + "title": "Training File" + }, + "validation_file": { + "title": "Validation File", + "type": "string", + "nullable": true + }, + "hyperparameters": { + "additionalProperties": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + }, + { + "additionalProperties": true, + "type": "object" + } + ] + }, + "type": "object", + "title": "Hyperparameters" + }, + "suffix": { + "title": "Suffix", + "type": "string", + "nullable": true + }, + "wandb_config": { + "title": "Wandb Config", + "additionalProperties": true, + "type": "object", + "nullable": true + } + }, + "type": "object", + "required": [ + "model", + "training_file", + "hyperparameters" + ], + "title": "CreateFineTuneRequest" + }, + "CreateFineTuneResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id" + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "CreateFineTuneResponse" + }, + "CreateLLMModelEndpointV1Response": { + "properties": { + "endpoint_creation_task_id": { + "type": "string", + "title": "Endpoint Creation Task Id" + } + }, + "type": "object", + "required": [ + "endpoint_creation_task_id" + ], + "title": "CreateLLMModelEndpointV1Response" + }, + "CreateLightLLMModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name" + }, + "model_name": { + "type": "string", + "title": "Model Name" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "min_workers": { + "type": "integer", + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "title": "Per Worker" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "default": "hugging_face" + }, + "inference_framework_image_tag": { + "type": "string", + "title": "Inference Framework Image Tag", + "default": "latest" + }, + "num_shards": { + "type": "integer", + "title": "Num Shards", + "default": 1 + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType", + "default": "sync" + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "lightllm", + "enum": [ + "lightllm" + ] + } + }, + "type": "object", + "required": [ + "name", + "model_name", + "metadata", + "min_workers", + "max_workers", + "per_worker", + "labels" + ], + "title": "CreateLightLLMModelEndpointRequest" + }, + "CreateModelBundleV1Request": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "location": { + "type": "string", + "title": "Location" + }, + "requirements": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Requirements" + }, + "env_params": { + "$ref": "#/components/schemas/ModelBundleEnvironmentParams" + }, + "packaging_type": { + "$ref": "#/components/schemas/ModelBundlePackagingType" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "app_config": { + "title": "App Config", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "schema_location": { + "title": "Schema Location", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "name", + "location", + "requirements", + "env_params", + "packaging_type" + ], + "title": "CreateModelBundleV1Request", + "description": "Request object for creating a Model Bundle." + }, + "CreateModelBundleV1Response": { + "properties": { + "model_bundle_id": { + "type": "string", + "title": "Model Bundle Id" + } + }, + "type": "object", + "required": [ + "model_bundle_id" + ], + "title": "CreateModelBundleV1Response", + "description": "Response object for creating a Model Bundle." + }, + "CreateModelBundleV2Request": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "schema_location": { + "type": "string", + "title": "Schema Location" + }, + "flavor": { + "oneOf": [ + { + "$ref": "#/components/schemas/CloudpickleArtifactFlavor" + }, + { + "$ref": "#/components/schemas/ZipArtifactFlavor" + }, + { + "$ref": "#/components/schemas/RunnableImageFlavor" + }, + { + "$ref": "#/components/schemas/StreamingEnhancedRunnableImageFlavor" + }, + { + "$ref": "#/components/schemas/TritonEnhancedRunnableImageFlavor" + } + ], + "title": "Flavor", + "discriminator": { + "propertyName": "flavor", + "mapping": { + "cloudpickle_artifact": "#/components/schemas/CloudpickleArtifactFlavor", + "runnable_image": "#/components/schemas/RunnableImageFlavor", + "streaming_enhanced_runnable_image": "#/components/schemas/StreamingEnhancedRunnableImageFlavor", + "triton_enhanced_runnable_image": "#/components/schemas/TritonEnhancedRunnableImageFlavor", + "zip_artifact": "#/components/schemas/ZipArtifactFlavor" + } + } + } + }, + "type": "object", + "required": [ + "name", + "schema_location", + "flavor" + ], + "title": "CreateModelBundleV2Request", + "description": "Request object for creating a Model Bundle." + }, + "CreateModelBundleV2Response": { + "properties": { + "model_bundle_id": { + "type": "string", + "title": "Model Bundle Id" + } + }, + "type": "object", + "required": [ + "model_bundle_id" + ], + "title": "CreateModelBundleV2Response", + "description": "Response object for creating a Model Bundle." + }, + "CreateModelEndpointV1Request": { + "properties": { + "name": { + "type": "string", + "maxLength": 63, + "title": "Name" + }, + "model_bundle_id": { + "type": "string", + "title": "Model Bundle Id" + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus" + }, + "gpus": { + "type": "integer", + "minimum": 0.0, + "title": "Gpus" + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory" + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage" + }, + "nodes_per_worker": { + "type": "integer", + "exclusiveMinimum": 0.0, + "title": "Nodes Per Worker", + "default": 1 + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "min_workers": { + "type": "integer", + "minimum": 0.0, + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "minimum": 0.0, + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "exclusiveMinimum": 0.0, + "title": "Per Worker" + }, + "concurrent_requests_per_worker": { + "title": "Concurrent Requests Per Worker", + "type": "integer", + "exclusiveMinimum": 0.0, + "nullable": true + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": false, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "name", + "model_bundle_id", + "endpoint_type", + "metadata", + "cpus", + "gpus", + "memory", + "storage", + "min_workers", + "max_workers", + "per_worker", + "labels" + ], + "title": "CreateModelEndpointV1Request" + }, + "CreateModelEndpointV1Response": { + "properties": { + "endpoint_creation_task_id": { + "type": "string", + "title": "Endpoint Creation Task Id" + } + }, + "type": "object", + "required": [ + "endpoint_creation_task_id" + ], + "title": "CreateModelEndpointV1Response" + }, + "CreateSGLangModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name" + }, + "model_name": { + "type": "string", + "title": "Model Name" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "min_workers": { + "type": "integer", + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "title": "Per Worker" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "default": "hugging_face" + }, + "inference_framework_image_tag": { + "type": "string", + "title": "Inference Framework Image Tag", + "default": "latest" + }, + "num_shards": { + "type": "integer", + "title": "Num Shards", + "default": 1 + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType", + "default": "sync" + }, + "trust_remote_code": { + "title": "Trust Remote Code", + "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", + "default": false, + "type": "boolean", + "nullable": true + }, + "tp_size": { + "title": "Tp Size", + "description": "The tensor parallel size.", + "type": "integer", + "nullable": true + }, + "skip_tokenizer_init": { + "title": "Skip Tokenizer Init", + "description": "If set, skip init tokenizer and pass input_ids in generate request", + "type": "boolean", + "nullable": true + }, + "load_format": { + "title": "Load Format", + "description": "The format of the model weights to load.", + "type": "string", + "nullable": true + }, + "dtype": { + "title": "Dtype", + "description": "Data type for model weights and activations.", + "type": "string", + "nullable": true + }, + "kv_cache_dtype": { + "title": "Kv Cache Dtype", + "description": "Data type for kv cache storage. \"auto\" will use model data type.", + "type": "string", + "nullable": true + }, + "quantization_param_path": { + "title": "Quantization Param Path", + "description": "Path to the JSON file containing the KV cache scaling factors.", + "type": "string", + "nullable": true + }, + "quantization": { + "title": "Quantization", + "description": "The quantization method.", + "type": "string", + "nullable": true + }, + "context_length": { + "title": "Context Length", + "description": "The model's maximum context length.", + "type": "integer", + "nullable": true + }, + "device": { + "title": "Device", + "description": "The device type.", + "type": "string", + "nullable": true + }, + "served_model_name": { + "title": "Served Model Name", + "description": "Override the model name returned by the v1/models endpoint in OpenAI API server.", + "type": "string", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "The builtin chat template name or path of the chat template file.", + "type": "string", + "nullable": true + }, + "is_embedding": { + "title": "Is Embedding", + "description": "Whether to use a CausalLM as an embedding model.", + "type": "boolean", + "nullable": true + }, + "revision": { + "title": "Revision", + "description": "The specific model version to use.", + "type": "string", + "nullable": true + }, + "mem_fraction_static": { + "title": "Mem Fraction Static", + "description": "The fraction of the memory used for static allocation.", + "type": "number", + "nullable": true + }, + "max_running_requests": { + "title": "Max Running Requests", + "description": "The maximum number of running requests.", + "type": "integer", + "nullable": true + }, + "max_total_tokens": { + "title": "Max Total Tokens", + "description": "The maximum number of tokens in the memory pool.", + "type": "integer", + "nullable": true + }, + "chunked_prefill_size": { + "title": "Chunked Prefill Size", + "description": "The maximum number of tokens in a chunk for the chunked prefill.", + "type": "integer", + "nullable": true + }, + "max_prefill_tokens": { + "title": "Max Prefill Tokens", + "description": "The maximum number of tokens in a prefill batch.", + "type": "integer", + "nullable": true + }, + "schedule_policy": { + "title": "Schedule Policy", + "description": "The scheduling policy of the requests.", + "type": "string", + "nullable": true + }, + "schedule_conservativeness": { + "title": "Schedule Conservativeness", + "description": "How conservative the schedule policy is.", + "type": "number", + "nullable": true + }, + "cpu_offload_gb": { + "title": "Cpu Offload Gb", + "description": "How many GBs of RAM to reserve for CPU offloading", + "type": "integer", + "nullable": true + }, + "prefill_only_one_req": { + "title": "Prefill Only One Req", + "description": "If true, we only prefill one request at one prefill batch", + "type": "boolean", + "nullable": true + }, + "stream_interval": { + "title": "Stream Interval", + "description": "The interval for streaming in terms of the token length.", + "type": "integer", + "nullable": true + }, + "random_seed": { + "title": "Random Seed", + "description": "The random seed.", + "type": "integer", + "nullable": true + }, + "constrained_json_whitespace_pattern": { + "title": "Constrained Json Whitespace Pattern", + "description": "Regex pattern for syntactic whitespaces allowed in JSON constrained output.", + "type": "string", + "nullable": true + }, + "watchdog_timeout": { + "title": "Watchdog Timeout", + "description": "Set watchdog timeout in seconds.", + "type": "number", + "nullable": true + }, + "download_dir": { + "title": "Download Dir", + "description": "Model download directory.", + "type": "string", + "nullable": true + }, + "base_gpu_id": { + "title": "Base Gpu Id", + "description": "The base GPU ID to start allocating GPUs from.", + "type": "integer", + "nullable": true + }, + "log_level": { + "title": "Log Level", + "description": "The logging level of all loggers.", + "type": "string", + "nullable": true + }, + "log_level_http": { + "title": "Log Level Http", + "description": "The logging level of HTTP server.", + "type": "string", + "nullable": true + }, + "log_requests": { + "title": "Log Requests", + "description": "Log the inputs and outputs of all requests.", + "type": "boolean", + "nullable": true + }, + "show_time_cost": { + "title": "Show Time Cost", + "description": "Show time cost of custom marks.", + "type": "boolean", + "nullable": true + }, + "enable_metrics": { + "title": "Enable Metrics", + "description": "Enable log prometheus metrics.", + "type": "boolean", + "nullable": true + }, + "decode_log_interval": { + "title": "Decode Log Interval", + "description": "The log interval of decode batch.", + "type": "integer", + "nullable": true + }, + "api_key": { + "title": "Api Key", + "description": "Set API key of the server.", + "type": "string", + "nullable": true + }, + "file_storage_pth": { + "title": "File Storage Pth", + "description": "The path of the file storage in backend.", + "type": "string", + "nullable": true + }, + "enable_cache_report": { + "title": "Enable Cache Report", + "description": "Return number of cached tokens in usage.prompt_tokens_details.", + "type": "boolean", + "nullable": true + }, + "data_parallel_size": { + "title": "Data Parallel Size", + "description": "The data parallelism size.", + "type": "integer", + "nullable": true + }, + "load_balance_method": { + "title": "Load Balance Method", + "description": "The load balancing strategy for data parallelism.", + "type": "string", + "nullable": true + }, + "expert_parallel_size": { + "title": "Expert Parallel Size", + "description": "The expert parallelism size.", + "type": "integer", + "nullable": true + }, + "dist_init_addr": { + "title": "Dist Init Addr", + "description": "The host address for initializing distributed backend.", + "type": "string", + "nullable": true + }, + "nnodes": { + "title": "Nnodes", + "description": "The number of nodes.", + "type": "integer", + "nullable": true + }, + "node_rank": { + "title": "Node Rank", + "description": "The node rank.", + "type": "integer", + "nullable": true + }, + "json_model_override_args": { + "title": "Json Model Override Args", + "description": "A dictionary in JSON string format used to override default model configurations.", + "type": "string", + "nullable": true + }, + "lora_paths": { + "title": "Lora Paths", + "description": "The list of LoRA adapters.", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "max_loras_per_batch": { + "title": "Max Loras Per Batch", + "description": "Maximum number of adapters for a running batch.", + "type": "integer", + "nullable": true + }, + "attention_backend": { + "title": "Attention Backend", + "description": "Choose the kernels for attention layers.", + "type": "string", + "nullable": true + }, + "sampling_backend": { + "title": "Sampling Backend", + "description": "Choose the kernels for sampling layers.", + "type": "string", + "nullable": true + }, + "grammar_backend": { + "title": "Grammar Backend", + "description": "Choose the backend for grammar-guided decoding.", + "type": "string", + "nullable": true + }, + "speculative_algorithm": { + "title": "Speculative Algorithm", + "description": "Speculative algorithm.", + "type": "string", + "nullable": true + }, + "speculative_draft_model_path": { + "title": "Speculative Draft Model Path", + "description": "The path of the draft model weights.", + "type": "string", + "nullable": true + }, + "speculative_num_steps": { + "title": "Speculative Num Steps", + "description": "The number of steps sampled from draft model in Speculative Decoding.", + "type": "integer", + "nullable": true + }, + "speculative_num_draft_tokens": { + "title": "Speculative Num Draft Tokens", + "description": "The number of token sampled from draft model in Speculative Decoding.", + "type": "integer", + "nullable": true + }, + "speculative_eagle_topk": { + "title": "Speculative Eagle Topk", + "description": "The number of token sampled from draft model in eagle2 each step.", + "type": "integer", + "nullable": true + }, + "enable_double_sparsity": { + "title": "Enable Double Sparsity", + "description": "Enable double sparsity attention", + "type": "boolean", + "nullable": true + }, + "ds_channel_config_path": { + "title": "Ds Channel Config Path", + "description": "The path of the double sparsity channel config", + "type": "string", + "nullable": true + }, + "ds_heavy_channel_num": { + "title": "Ds Heavy Channel Num", + "description": "The number of heavy channels in double sparsity attention", + "type": "integer", + "nullable": true + }, + "ds_heavy_token_num": { + "title": "Ds Heavy Token Num", + "description": "The number of heavy tokens in double sparsity attention", + "type": "integer", + "nullable": true + }, + "ds_heavy_channel_type": { + "title": "Ds Heavy Channel Type", + "description": "The type of heavy channels in double sparsity attention", + "type": "string", + "nullable": true + }, + "ds_sparse_decode_threshold": { + "title": "Ds Sparse Decode Threshold", + "description": "The threshold for sparse decoding in double sparsity attention", + "type": "integer", + "nullable": true + }, + "disable_radix_cache": { + "title": "Disable Radix Cache", + "description": "Disable RadixAttention for prefix caching.", + "type": "boolean", + "nullable": true + }, + "disable_jump_forward": { + "title": "Disable Jump Forward", + "description": "Disable jump-forward for grammar-guided decoding.", + "type": "boolean", + "nullable": true + }, + "disable_cuda_graph": { + "title": "Disable Cuda Graph", + "description": "Disable cuda graph.", + "type": "boolean", + "nullable": true + }, + "disable_cuda_graph_padding": { + "title": "Disable Cuda Graph Padding", + "description": "Disable cuda graph when padding is needed.", + "type": "boolean", + "nullable": true + }, + "disable_outlines_disk_cache": { + "title": "Disable Outlines Disk Cache", + "description": "Disable disk cache of outlines.", + "type": "boolean", + "nullable": true + }, + "disable_custom_all_reduce": { + "title": "Disable Custom All Reduce", + "description": "Disable the custom all-reduce kernel.", + "type": "boolean", + "nullable": true + }, + "disable_mla": { + "title": "Disable Mla", + "description": "Disable Multi-head Latent Attention (MLA) for DeepSeek-V2.", + "type": "boolean", + "nullable": true + }, + "disable_overlap_schedule": { + "title": "Disable Overlap Schedule", + "description": "Disable the overlap scheduler.", + "type": "boolean", + "nullable": true + }, + "enable_mixed_chunk": { + "title": "Enable Mixed Chunk", + "description": "Enable mixing prefill and decode in a batch when using chunked prefill.", + "type": "boolean", + "nullable": true + }, + "enable_dp_attention": { + "title": "Enable Dp Attention", + "description": "Enable data parallelism for attention and tensor parallelism for FFN.", + "type": "boolean", + "nullable": true + }, + "enable_ep_moe": { + "title": "Enable Ep Moe", + "description": "Enable expert parallelism for moe.", + "type": "boolean", + "nullable": true + }, + "enable_torch_compile": { + "title": "Enable Torch Compile", + "description": "Optimize the model with torch.compile.", + "type": "boolean", + "nullable": true + }, + "torch_compile_max_bs": { + "title": "Torch Compile Max Bs", + "description": "Set the maximum batch size when using torch compile.", + "type": "integer", + "nullable": true + }, + "cuda_graph_max_bs": { + "title": "Cuda Graph Max Bs", + "description": "Set the maximum batch size for cuda graph.", + "type": "integer", + "nullable": true + }, + "cuda_graph_bs": { + "title": "Cuda Graph Bs", + "description": "Set the list of batch sizes for cuda graph.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "torchao_config": { + "title": "Torchao Config", + "description": "Optimize the model with torchao.", + "type": "string", + "nullable": true + }, + "enable_nan_detection": { + "title": "Enable Nan Detection", + "description": "Enable the NaN detection for debugging purposes.", + "type": "boolean", + "nullable": true + }, + "enable_p2p_check": { + "title": "Enable P2P Check", + "description": "Enable P2P check for GPU access.", + "type": "boolean", + "nullable": true + }, + "triton_attention_reduce_in_fp32": { + "title": "Triton Attention Reduce In Fp32", + "description": "Cast the intermediate attention results to fp32.", + "type": "boolean", + "nullable": true + }, + "triton_attention_num_kv_splits": { + "title": "Triton Attention Num Kv Splits", + "description": "The number of KV splits in flash decoding Triton kernel.", + "type": "integer", + "nullable": true + }, + "num_continuous_decode_steps": { + "title": "Num Continuous Decode Steps", + "description": "Run multiple continuous decoding steps to reduce scheduling overhead.", + "type": "integer", + "nullable": true + }, + "delete_ckpt_after_loading": { + "title": "Delete Ckpt After Loading", + "description": "Delete the model checkpoint after loading the model.", + "type": "boolean", + "nullable": true + }, + "enable_memory_saver": { + "title": "Enable Memory Saver", + "description": "Allow saving memory using release_memory_occupation and resume_memory_occupation", + "type": "boolean", + "nullable": true + }, + "allow_auto_truncate": { + "title": "Allow Auto Truncate", + "description": "Allow automatically truncating requests that exceed the maximum input length.", + "type": "boolean", + "nullable": true + }, + "enable_custom_logit_processor": { + "title": "Enable Custom Logit Processor", + "description": "Enable users to pass custom logit processors to the server.", + "type": "boolean", + "nullable": true + }, + "tool_call_parser": { + "title": "Tool Call Parser", + "description": "Specify the parser for handling tool-call interactions.", + "type": "string", + "nullable": true + }, + "huggingface_repo": { + "title": "Huggingface Repo", + "description": "The Hugging Face repository ID.", + "type": "string", + "nullable": true + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "sglang", + "enum": [ + "sglang" + ] + } + }, + "type": "object", + "required": [ + "name", + "model_name", + "metadata", + "min_workers", + "max_workers", + "per_worker", + "labels" + ], + "title": "CreateSGLangModelEndpointRequest" + }, + "CreateTensorRTLLMModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name" + }, + "model_name": { + "type": "string", + "title": "Model Name" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "min_workers": { + "type": "integer", + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "title": "Per Worker" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "default": "hugging_face" + }, + "inference_framework_image_tag": { + "type": "string", + "title": "Inference Framework Image Tag", + "default": "latest" + }, + "num_shards": { + "type": "integer", + "title": "Num Shards", + "default": 1 + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType", + "default": "sync" + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "tensorrt_llm", + "enum": [ + "tensorrt_llm" + ] + } + }, + "type": "object", + "required": [ + "name", + "model_name", + "metadata", + "min_workers", + "max_workers", + "per_worker", + "labels" + ], + "title": "CreateTensorRTLLMModelEndpointRequest" + }, + "CreateTextGenerationInferenceModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name" + }, + "model_name": { + "type": "string", + "title": "Model Name" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "min_workers": { + "type": "integer", + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "title": "Per Worker" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "default": "hugging_face" + }, + "inference_framework_image_tag": { + "type": "string", + "title": "Inference Framework Image Tag", + "default": "latest" + }, + "num_shards": { + "type": "integer", + "title": "Num Shards", + "default": 1 + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType", + "default": "sync" + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "text_generation_inference", + "enum": [ + "text_generation_inference" + ] + } + }, + "type": "object", + "required": [ + "name", + "model_name", + "metadata", + "min_workers", + "max_workers", + "per_worker", + "labels" + ], + "title": "CreateTextGenerationInferenceModelEndpointRequest" + }, + "CreateTriggerV1Request": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "cron_schedule": { + "type": "string", + "title": "Cron Schedule" + }, + "bundle_id": { + "type": "string", + "title": "Bundle Id" + }, + "default_job_config": { + "title": "Default Job Config", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_job_metadata": { + "title": "Default Job Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + } + }, + "type": "object", + "required": [ + "name", + "cron_schedule", + "bundle_id" + ], + "title": "CreateTriggerV1Request" + }, + "CreateTriggerV1Response": { + "properties": { + "trigger_id": { + "type": "string", + "title": "Trigger Id" + } + }, + "type": "object", + "required": [ + "trigger_id" + ], + "title": "CreateTriggerV1Response" + }, + "CreateVLLMModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name" + }, + "model_name": { + "type": "string", + "title": "Model Name" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "min_workers": { + "type": "integer", + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "title": "Per Worker" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Labels" + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "default": "hugging_face" + }, + "inference_framework_image_tag": { + "type": "string", + "title": "Inference Framework Image Tag", + "default": "latest" + }, + "num_shards": { + "type": "integer", + "title": "Num Shards", + "default": 1 + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType", + "default": "sync" + }, + "max_gpu_memory_utilization": { + "title": "Max Gpu Memory Utilization", + "description": "Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig", + "type": "number", + "nullable": true + }, + "attention_backend": { + "title": "Attention Backend", + "description": "Attention backend to use for vLLM. Default to None.", + "type": "string", + "nullable": true + }, + "max_model_len": { + "title": "Max Model Len", + "description": "Model context length, If unspecified, will be automatically derived from the model config", + "type": "integer", + "nullable": true + }, + "max_num_seqs": { + "title": "Max Num Seqs", + "description": "Maximum number of sequences per iteration", + "type": "integer", + "nullable": true + }, + "enforce_eager": { + "title": "Enforce Eager", + "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", + "type": "boolean", + "nullable": true + }, + "trust_remote_code": { + "title": "Trust Remote Code", + "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", + "default": false, + "type": "boolean", + "nullable": true + }, + "pipeline_parallel_size": { + "title": "Pipeline Parallel Size", + "description": "Number of pipeline stages. Default to None.", + "type": "integer", + "nullable": true + }, + "tensor_parallel_size": { + "title": "Tensor Parallel Size", + "description": "Number of tensor parallel replicas. Default to None.", + "type": "integer", + "nullable": true + }, + "quantization": { + "title": "Quantization", + "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", + "type": "string", + "nullable": true + }, + "disable_log_requests": { + "title": "Disable Log Requests", + "description": "Disable logging requests. Default to None.", + "type": "boolean", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "tool_call_parser": { + "title": "Tool Call Parser", + "description": "Tool call parser", + "type": "string", + "nullable": true + }, + "enable_auto_tool_choice": { + "title": "Enable Auto Tool Choice", + "description": "Enable auto tool choice", + "type": "boolean", + "nullable": true + }, + "load_format": { + "title": "Load Format", + "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", + "type": "string", + "nullable": true + }, + "config_format": { + "title": "Config Format", + "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", + "type": "string", + "nullable": true + }, + "tokenizer_mode": { + "title": "Tokenizer Mode", + "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", + "type": "string", + "nullable": true + }, + "limit_mm_per_prompt": { + "title": "Limit Mm Per Prompt", + "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", + "type": "string", + "nullable": true + }, + "max_num_batched_tokens": { + "title": "Max Num Batched Tokens", + "description": "Maximum number of batched tokens per iteration", + "type": "integer", + "nullable": true + }, + "tokenizer": { + "title": "Tokenizer", + "description": "Name or path of the huggingface tokenizer to use.", + "type": "string", + "nullable": true + }, + "dtype": { + "title": "Dtype", + "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", + "type": "string", + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "Random seed for reproducibility.", + "type": "integer", + "nullable": true + }, + "revision": { + "title": "Revision", + "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "code_revision": { + "title": "Code Revision", + "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "rope_scaling": { + "title": "Rope Scaling", + "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "tokenizer_revision": { + "title": "Tokenizer Revision", + "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "quantization_param_path": { + "title": "Quantization Param Path", + "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", + "type": "string", + "nullable": true + }, + "max_seq_len_to_capture": { + "title": "Max Seq Len To Capture", + "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", + "type": "integer", + "nullable": true + }, + "disable_sliding_window": { + "title": "Disable Sliding Window", + "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", + "type": "boolean", + "nullable": true + }, + "skip_tokenizer_init": { + "title": "Skip Tokenizer Init", + "description": "If true, skip initialization of tokenizer and detokenizer.", + "type": "boolean", + "nullable": true + }, + "served_model_name": { + "title": "Served Model Name", + "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", + "type": "string", + "nullable": true + }, + "override_neuron_config": { + "title": "Override Neuron Config", + "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "mm_processor_kwargs": { + "title": "Mm Processor Kwargs", + "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "block_size": { + "title": "Block Size", + "description": "Size of a cache block in number of tokens.", + "type": "integer", + "nullable": true + }, + "gpu_memory_utilization": { + "title": "Gpu Memory Utilization", + "description": "Fraction of GPU memory to use for the vLLM execution.", + "type": "number", + "nullable": true + }, + "swap_space": { + "title": "Swap Space", + "description": "Size of the CPU swap space per GPU (in GiB).", + "type": "number", + "nullable": true + }, + "cache_dtype": { + "title": "Cache Dtype", + "description": "Data type for kv cache storage.", + "type": "string", + "nullable": true + }, + "num_gpu_blocks_override": { + "title": "Num Gpu Blocks Override", + "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", + "type": "integer", + "nullable": true + }, + "enable_prefix_caching": { + "title": "Enable Prefix Caching", + "description": "Enables automatic prefix caching.", + "type": "boolean", + "nullable": true + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "vllm", + "enum": [ + "vllm" + ] + } + }, + "type": "object", + "required": [ + "name", + "model_name", + "metadata", + "min_workers", + "max_workers", + "per_worker", + "labels" + ], + "title": "CreateVLLMModelEndpointRequest" + }, + "CustomFramework": { + "properties": { + "framework_type": { + "type": "string", + "title": "Framework Type", + "enum": [ + "custom_base_image" + ] + }, + "image_repository": { + "type": "string", + "title": "Image Repository" + }, + "image_tag": { + "type": "string", + "title": "Image Tag" + } + }, + "type": "object", + "required": [ + "framework_type", + "image_repository", + "image_tag" + ], + "title": "CustomFramework", + "description": "This is the entity-layer class for a custom framework specification." + }, + "DeleteFileResponse": { + "properties": { + "deleted": { + "type": "boolean", + "title": "Deleted", + "description": "Whether deletion was successful." + } + }, + "type": "object", + "required": [ + "deleted" + ], + "title": "DeleteFileResponse", + "description": "Response object for deleting a file." + }, + "DeleteLLMEndpointResponse": { + "properties": { + "deleted": { + "type": "boolean", + "title": "Deleted" + } + }, + "type": "object", + "required": [ + "deleted" + ], + "title": "DeleteLLMEndpointResponse" + }, + "DeleteModelEndpointV1Response": { + "properties": { + "deleted": { + "type": "boolean", + "title": "Deleted" + } + }, + "type": "object", + "required": [ + "deleted" + ], + "title": "DeleteModelEndpointV1Response" + }, + "DeleteTriggerV1Response": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "DeleteTriggerV1Response" + }, + "DockerImageBatchJob": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "created_by": { + "type": "string", + "title": "Created By" + }, + "owner": { + "type": "string", + "title": "Owner" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "format": "date-time", + "nullable": true + }, + "status": { + "$ref": "#/components/schemas/BatchJobStatus" + }, + "annotations": { + "title": "Annotations", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "override_job_max_runtime_s": { + "title": "Override Job Max Runtime S", + "type": "integer", + "nullable": true + }, + "num_workers": { + "title": "Num Workers", + "default": 1, + "type": "integer", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "created_by", + "owner", + "created_at", + "status" + ], + "title": "DockerImageBatchJob", + "description": "This is the entity-layer class for a Docker Image Batch Job, i.e. a batch job\ncreated via the \"supply a docker image for a k8s job\" API." + }, + "DockerImageBatchJobBundleV1Response": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "image_repository": { + "type": "string", + "title": "Image Repository" + }, + "image_tag": { + "type": "string", + "title": "Image Tag" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Command" + }, + "env": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Env" + }, + "mount_location": { + "title": "Mount Location", + "type": "string", + "nullable": true + }, + "cpus": { + "title": "Cpus", + "type": "string", + "nullable": true + }, + "memory": { + "title": "Memory", + "type": "string", + "nullable": true + }, + "storage": { + "title": "Storage", + "type": "string", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "gpu_type": { + "title": "Gpu Type", + "type": "string", + "nullable": true + }, + "public": { + "title": "Public", + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "name", + "created_at", + "image_repository", + "image_tag", + "command", + "env" + ], + "title": "DockerImageBatchJobBundleV1Response" + }, + "EndpointPredictV1Request": { + "properties": { + "url": { + "title": "Url", + "type": "string", + "nullable": true + }, + "args": { + "$ref": "#/components/schemas/RequestSchema", + "nullable": true + }, + "cloudpickle": { + "title": "Cloudpickle", + "type": "string", + "nullable": true + }, + "callback_url": { + "title": "Callback Url", + "type": "string", + "nullable": true + }, + "callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "return_pickled": { + "type": "boolean", + "title": "Return Pickled", + "default": false + }, + "destination_path": { + "title": "Destination Path", + "type": "string", + "nullable": true + } + }, + "type": "object", + "title": "EndpointPredictV1Request" + }, + "File": { + "properties": { + "filename": { + "title": "Filename", + "description": "The name of the file, used when passing the file to the model as a \nstring.\n", + "type": "string", + "nullable": true + }, + "file_data": { + "title": "File Data", + "description": "The base64 encoded file data, used when passing the file to the model \nas a string.\n", + "type": "string", + "nullable": true + }, + "file_id": { + "title": "File Id", + "description": "The ID of an uploaded file to use as input.\n", + "type": "string", + "nullable": true + } + }, + "type": "object", + "title": "File" + }, + "FilteredChatCompletionV2Request": { + "properties": { + "best_of": { + "title": "Best Of", + "description": "Number of output sequences that are generated from the prompt.\n From these `best_of` sequences, the top `n` sequences are returned.\n `best_of` must be greater than or equal to `n`. This is treated as\n the beam width when `use_beam_search` is True. By default, `best_of`\n is set to `n`.", + "type": "integer", + "nullable": true + }, + "top_k": { + "title": "Top K", + "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", + "type": "integer", + "minimum": -1.0, + "nullable": true + }, + "min_p": { + "title": "Min P", + "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", + "type": "number", + "nullable": true + }, + "use_beam_search": { + "title": "Use Beam Search", + "description": "Whether to use beam search for sampling.", + "type": "boolean", + "nullable": true + }, + "length_penalty": { + "title": "Length Penalty", + "description": "Float that penalizes sequences based on their length.\n Used in beam search.", + "type": "number", + "nullable": true + }, + "repetition_penalty": { + "title": "Repetition Penalty", + "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", + "type": "number", + "nullable": true + }, + "early_stopping": { + "title": "Early Stopping", + "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", + "type": "boolean", + "nullable": true + }, + "stop_token_ids": { + "title": "Stop Token Ids", + "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "include_stop_str_in_output": { + "title": "Include Stop Str In Output", + "description": "Whether to include the stop strings in\n output text. Defaults to False.", + "type": "boolean", + "nullable": true + }, + "ignore_eos": { + "title": "Ignore Eos", + "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", + "type": "boolean", + "nullable": true + }, + "min_tokens": { + "title": "Min Tokens", + "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", + "type": "integer", + "nullable": true + }, + "skip_special_tokens": { + "title": "Skip Special Tokens", + "description": "Whether to skip special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "spaces_between_special_tokens": { + "title": "Spaces Between Special Tokens", + "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "echo": { + "title": "Echo", + "description": "If true, the new message will be prepended with the last message if they belong to the same role.", + "type": "boolean", + "nullable": true + }, + "add_generation_prompt": { + "title": "Add Generation Prompt", + "description": "If true, the generation prompt will be added to the chat template. This is a parameter used by chat template in tokenizer config of the model.", + "type": "boolean", + "nullable": true + }, + "continue_final_message": { + "title": "Continue Final Message", + "description": "If this is set, the chat will be formatted so that the final message in the chat is open-ended, without any EOS tokens. The model will continue this message rather than starting a new one. This allows you to \"prefill\" part of the model's response for it. Cannot be used at the same time as `add_generation_prompt`.", + "type": "boolean", + "nullable": true + }, + "add_special_tokens": { + "title": "Add Special Tokens", + "description": "If true, special tokens (e.g. BOS) will be added to the prompt on top of what is added by the chat template. For most models, the chat template takes care of adding the special tokens so this should be set to false (as is the default).", + "type": "boolean", + "nullable": true + }, + "documents": { + "title": "Documents", + "description": "A list of dicts representing documents that will be accessible to the model if it is performing RAG (retrieval-augmented generation). If the template does not support RAG, this argument will have no effect. We recommend that each document should be a dict containing \"title\" and \"text\" keys.", + "items": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "type": "array", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "A Jinja template to use for this conversion. As of transformers v4.44, default chat template is no longer allowed, so you must provide a chat template if the model's tokenizer does not define one and no override template is given", + "type": "string", + "nullable": true + }, + "chat_template_kwargs": { + "title": "Chat Template Kwargs", + "description": "Additional kwargs to pass to the template renderer. Will be accessible by the chat template.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_json": { + "title": "Guided Json", + "description": "JSON schema for guided decoding. Only supported in vllm.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_regex": { + "title": "Guided Regex", + "description": "Regex for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_choice": { + "title": "Guided Choice", + "description": "Choices for guided decoding. Only supported in vllm.", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "guided_grammar": { + "title": "Guided Grammar", + "description": "Context-free grammar for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_decoding_backend": { + "title": "Guided Decoding Backend", + "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", + "type": "string", + "nullable": true + }, + "guided_whitespace_pattern": { + "title": "Guided Whitespace Pattern", + "description": "If specified, will override the default whitespace pattern for guided json decoding.", + "type": "string", + "nullable": true + }, + "priority": { + "title": "Priority", + "description": "The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.", + "type": "integer", + "nullable": true + }, + "metadata": { + "$ref": "#/components/schemas/Metadata", + "nullable": true + }, + "temperature": { + "title": "Temperature", + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.\n", + "default": 1, + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "top_p": { + "title": "Top P", + "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", + "default": 1, + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "user": { + "title": "User", + "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", + "type": "string", + "nullable": true, + "example": "user-1234" + }, + "service_tier": { + "$ref": "#/components/schemas/ServiceTier", + "nullable": true + }, + "messages": { + "items": { + "$ref": "#/components/schemas/ChatCompletionRequestMessage" + }, + "type": "array", + "minItems": 1, + "title": "Messages", + "description": "A list of messages comprising the conversation so far. Depending on the\n[model](/docs/models) you use, different message types (modalities) are\nsupported, like [text](/docs/guides/text-generation),\n[images](/docs/guides/vision), and [audio](/docs/guides/audio).\n" + }, + "model": { + "title": "Model", + "type": "string", + "nullable": true + }, + "modalities": { + "$ref": "#/components/schemas/ResponseModalities", + "nullable": true + }, + "reasoning_effort": { + "$ref": "#/components/schemas/ReasoningEffort", + "nullable": true + }, + "max_completion_tokens": { + "title": "Max Completion Tokens", + "description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and [reasoning tokens](/docs/guides/reasoning).\n", + "type": "integer", + "nullable": true + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim.\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "presence_penalty": { + "title": "Presence Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics.\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "web_search_options": { + "title": "Web search", + "description": "This tool searches the web for relevant results to use in a response.\nLearn more about the [web search tool](/docs/guides/tools-web-search?api-mode=chat).\n", + "$ref": "#/components/schemas/WebSearchOptions", + "nullable": true + }, + "top_logprobs": { + "title": "Top Logprobs", + "description": "An integer between 0 and 20 specifying the number of most likely tokens to\nreturn at each token position, each with an associated log probability.\n`logprobs` must be set to `true` if this parameter is used.\n", + "type": "integer", + "maximum": 20.0, + "minimum": 0.0, + "nullable": true + }, + "response_format": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResponseFormatText" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonSchema" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonObject" + } + ], + "title": "Response Format", + "description": "An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it.\n", + "nullable": true + }, + "audio": { + "description": "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`. [Learn more](/docs/guides/audio).\n", + "$ref": "#/components/schemas/Audio2", + "nullable": true + }, + "store": { + "title": "Store", + "description": "Whether or not to store the output of this chat completion request for \nuse in our [model distillation](/docs/guides/distillation) or\n[evals](/docs/guides/evals) products.\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "stream": { + "title": "Stream", + "default": false, + "type": "boolean", + "nullable": true + }, + "stop": { + "$ref": "#/components/schemas/StopConfiguration", + "nullable": true + }, + "logit_bias": { + "title": "Logit Bias", + "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the\ntokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should\ndecrease or increase likelihood of selection; values like -100 or 100\nshould result in a ban or exclusive selection of the relevant token.\n", + "additionalProperties": { + "type": "integer" + }, + "type": "object", + "nullable": true + }, + "logprobs": { + "title": "Logprobs", + "description": "Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the\n`content` of `message`.\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "max_tokens": { + "title": "Max Tokens", + "description": "The maximum number of [tokens](/tokenizer) that can be generated in the\nchat completion. This value can be used to control\n[costs](https://openai.com/api/pricing/) for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with [o-series models](/docs/guides/reasoning).\n", + "type": "integer", + "nullable": true + }, + "n": { + "title": "N", + "description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.", + "default": 1, + "type": "integer", + "maximum": 128.0, + "minimum": 1.0, + "nullable": true, + "example": 1 + }, + "prediction": { + "description": "Configuration for a [Predicted Output](/docs/guides/predicted-outputs),\nwhich can greatly improve response times when large parts of the model\nresponse are known ahead of time. This is most common when you are\nregenerating a file with only minor changes to most of the content.\n", + "$ref": "#/components/schemas/PredictionContent", + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", + "type": "integer", + "maximum": 9.223372036854776e+18, + "minimum": -9.223372036854776e+18, + "nullable": true + }, + "stream_options": { + "$ref": "#/components/schemas/ChatCompletionStreamOptions", + "nullable": true + }, + "tools": { + "title": "Tools", + "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n", + "items": { + "$ref": "#/components/schemas/ChatCompletionTool" + }, + "type": "array", + "nullable": true + }, + "tool_choice": { + "$ref": "#/components/schemas/ChatCompletionToolChoiceOption", + "nullable": true + }, + "parallel_tool_calls": { + "$ref": "#/components/schemas/ParallelToolCalls", + "nullable": true + }, + "function_call": { + "anyOf": [ + { + "type": "string", + "enum": [ + "none", + "auto" + ] + }, + { + "$ref": "#/components/schemas/ChatCompletionFunctionCallOption" + } + ], + "title": "Function Call", + "description": "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n\n`none` means the model will not call a function and instead generates a\nmessage.\n\n`auto` means the model can pick between generating a message or calling a\nfunction.\n\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the\nmodel to call that function.\n\n`none` is the default when no functions are present. `auto` is the default\nif functions are present.\n", + "nullable": true + }, + "functions": { + "title": "Functions", + "description": "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.\n", + "items": { + "$ref": "#/components/schemas/ChatCompletionFunctions" + }, + "type": "array", + "maxItems": 128, + "minItems": 1, + "nullable": true + } + }, + "type": "object", + "required": [ + "messages" + ], + "title": "FilteredChatCompletionV2Request" + }, + "FilteredCompletionV2Request": { + "properties": { + "best_of": { + "title": "Best Of", + "description": "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n\nWhen used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return \u2013 `best_of` must be greater than `n`.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", + "default": 1, + "type": "integer", + "maximum": 20.0, + "minimum": 0.0, + "nullable": true + }, + "top_k": { + "title": "Top K", + "description": "Controls the number of top tokens to consider. -1 means consider all tokens.", + "type": "integer", + "minimum": -1.0, + "nullable": true + }, + "min_p": { + "title": "Min P", + "description": "Float that represents the minimum probability for a token to be\n considered, relative to the probability of the most likely token.\n Must be in [0, 1]. Set to 0 to disable this.", + "type": "number", + "nullable": true + }, + "use_beam_search": { + "title": "Use Beam Search", + "description": "Whether to use beam search for sampling.", + "type": "boolean", + "nullable": true + }, + "length_penalty": { + "title": "Length Penalty", + "description": "Float that penalizes sequences based on their length.\n Used in beam search.", + "type": "number", + "nullable": true + }, + "repetition_penalty": { + "title": "Repetition Penalty", + "description": "Float that penalizes new tokens based on whether\n they appear in the prompt and the generated text so far. Values > 1\n encourage the model to use new tokens, while values < 1 encourage\n the model to repeat tokens.", + "type": "number", + "nullable": true + }, + "early_stopping": { + "title": "Early Stopping", + "description": "Controls the stopping condition for beam search. It\n accepts the following values: `True`, where the generation stops as\n soon as there are `best_of` complete candidates; `False`, where an\n heuristic is applied and the generation stops when is it very\n unlikely to find better candidates; `\"never\"`, where the beam search\n procedure only stops when there cannot be better candidates\n (canonical beam search algorithm).", + "type": "boolean", + "nullable": true + }, + "stop_token_ids": { + "title": "Stop Token Ids", + "description": "List of tokens that stop the generation when they are\n generated. The returned output will contain the stop tokens unless\n the stop tokens are special tokens.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "include_stop_str_in_output": { + "title": "Include Stop Str In Output", + "description": "Whether to include the stop strings in output text.", + "type": "boolean", + "nullable": true + }, + "ignore_eos": { + "title": "Ignore Eos", + "description": "Whether to ignore the EOS token and continue generating\n tokens after the EOS token is generated.", + "type": "boolean", + "nullable": true + }, + "min_tokens": { + "title": "Min Tokens", + "description": "Minimum number of tokens to generate per output sequence\n before EOS or stop_token_ids can be generated", + "type": "integer", + "nullable": true + }, + "skip_special_tokens": { + "title": "Skip Special Tokens", + "description": "Whether to skip special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "spaces_between_special_tokens": { + "title": "Spaces Between Special Tokens", + "description": "Whether to add spaces between special tokens in the output. Only supported in vllm.", + "default": true, + "type": "boolean", + "nullable": true + }, + "add_special_tokens": { + "title": "Add Special Tokens", + "description": "If true (the default), special tokens (e.g. BOS) will be added to the prompt.", + "type": "boolean", + "nullable": true + }, + "response_format": { + "anyOf": [ + { + "$ref": "#/components/schemas/ResponseFormatText" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonSchema" + }, + { + "$ref": "#/components/schemas/ResponseFormatJsonObject" + } + ], + "title": "Response Format", + "description": "Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'} or {'type': 'text' } is supported.", + "nullable": true + }, + "guided_json": { + "title": "Guided Json", + "description": "JSON schema for guided decoding. Only supported in vllm.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "guided_regex": { + "title": "Guided Regex", + "description": "Regex for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_choice": { + "title": "Guided Choice", + "description": "Choices for guided decoding. Only supported in vllm.", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "guided_grammar": { + "title": "Guided Grammar", + "description": "Context-free grammar for guided decoding. Only supported in vllm.", + "type": "string", + "nullable": true + }, + "guided_decoding_backend": { + "title": "Guided Decoding Backend", + "description": "If specified, will override the default guided decoding backend of the server for this specific request. If set, must be either 'outlines' / 'lm-format-enforcer'", + "type": "string", + "nullable": true + }, + "guided_whitespace_pattern": { + "title": "Guided Whitespace Pattern", + "description": "If specified, will override the default whitespace pattern for guided json decoding.", + "type": "string", + "nullable": true + }, + "model": { + "title": "Model", + "type": "string", + "nullable": true + }, + "prompt": { + "anyOf": [ + { + "type": "string" + }, + { + "items": { + "type": "string" + }, + "type": "array" + }, + { + "$ref": "#/components/schemas/Prompt" + }, + { + "$ref": "#/components/schemas/Prompt1" + } + ], + "title": "Prompt", + "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", + "nullable": true + }, + "echo": { + "title": "Echo", + "description": "Echo back the prompt in addition to the completion\n", + "default": false, + "type": "boolean", + "nullable": true + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "logit_bias": { + "title": "Logit Bias", + "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n\nAs an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.\n", + "additionalProperties": { + "type": "integer" + }, + "type": "object", + "nullable": true + }, + "logprobs": { + "title": "Logprobs", + "description": "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n\nThe maximum value for `logprobs` is 5.\n", + "type": "integer", + "maximum": 5.0, + "minimum": 0.0, + "nullable": true + }, + "max_tokens": { + "title": "Max Tokens", + "description": "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n\nThe token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.\n", + "default": 16, + "type": "integer", + "minimum": 0.0, + "nullable": true, + "example": 16 + }, + "n": { + "title": "N", + "description": "How many completions to generate for each prompt.\n\n**Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.\n", + "default": 1, + "type": "integer", + "maximum": 128.0, + "minimum": 1.0, + "nullable": true, + "example": 1 + }, + "presence_penalty": { + "title": "Presence Penalty", + "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n\n[See more information about frequency and presence penalties.](/docs/guides/text-generation)\n", + "default": 0, + "type": "number", + "maximum": 2.0, + "minimum": -2.0, + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.\n", + "type": "integer", + "nullable": true + }, + "stop": { + "$ref": "#/components/schemas/StopConfiguration", + "nullable": true + }, + "stream": { + "title": "Stream", + "default": false, + "type": "boolean", + "nullable": true + }, + "stream_options": { + "$ref": "#/components/schemas/ChatCompletionStreamOptions", + "nullable": true + }, + "suffix": { + "title": "Suffix", + "description": "The suffix that comes after a completion of inserted text.\n\nThis parameter is only supported for `gpt-3.5-turbo-instruct`.\n", + "type": "string", + "nullable": true, + "example": "test." + }, + "temperature": { + "title": "Temperature", + "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.\n", + "default": 1, + "type": "number", + "maximum": 2.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "top_p": { + "title": "Top P", + "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n\nWe generally recommend altering this or `temperature` but not both.\n", + "default": 1, + "type": "number", + "maximum": 1.0, + "minimum": 0.0, + "nullable": true, + "example": 1 + }, + "user": { + "title": "User", + "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids).\n", + "type": "string", + "nullable": true, + "example": "user-1234" + } + }, + "type": "object", + "required": [ + "prompt" + ], + "title": "FilteredCompletionV2Request" + }, + "Function1": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the function to call." + }, + "arguments": { + "type": "string", + "title": "Arguments", + "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." + } + }, + "type": "object", + "required": [ + "name", + "arguments" + ], + "title": "Function1" + }, + "Function2": { + "properties": { + "name": { + "title": "Name", + "description": "The name of the function to call.", + "type": "string", + "nullable": true + }, + "arguments": { + "title": "Arguments", + "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "title": "Function2" + }, + "Function3": { + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the function to call." + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "Function3" + }, + "FunctionCall": { + "properties": { + "arguments": { + "type": "string", + "title": "Arguments", + "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." + }, + "name": { + "type": "string", + "title": "Name", + "description": "The name of the function to call." + } + }, + "type": "object", + "required": [ + "arguments", + "name" + ], + "title": "FunctionCall" + }, + "FunctionCall2": { + "properties": { + "arguments": { + "title": "Arguments", + "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.", + "type": "string", + "nullable": true + }, + "name": { + "title": "Name", + "description": "The name of the function to call.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "title": "FunctionCall2" + }, + "FunctionObject": { + "properties": { + "description": { + "title": "Description", + "description": "A description of what the function does, used by the model to choose when and how to call the function.", + "type": "string", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name", + "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." + }, + "parameters": { + "$ref": "#/components/schemas/FunctionParameters", + "nullable": true + }, + "strict": { + "title": "Strict", + "description": "Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).", + "default": false, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "FunctionObject" + }, + "FunctionParameters": { + "properties": {}, + "additionalProperties": true, + "type": "object", + "title": "FunctionParameters" + }, + "GetAsyncTaskV1Response": { + "properties": { + "task_id": { + "type": "string", + "title": "Task Id" + }, + "status": { + "$ref": "#/components/schemas/TaskStatus" + }, + "result": { + "$ref": "#/components/schemas/ResponseSchema", + "nullable": true + }, + "traceback": { + "title": "Traceback", + "type": "string", + "nullable": true + }, + "status_code": { + "title": "Status Code", + "type": "integer", + "nullable": true + } + }, + "type": "object", + "required": [ + "task_id", + "status" + ], + "title": "GetAsyncTaskV1Response" + }, + "GetBatchCompletionV2Response": { + "properties": { + "job": { + "$ref": "#/components/schemas/BatchCompletionsJob" + } + }, + "type": "object", + "required": [ + "job" + ], + "title": "GetBatchCompletionV2Response" + }, + "GetBatchJobV1Response": { + "properties": { + "status": { + "$ref": "#/components/schemas/BatchJobStatus" + }, + "result": { + "title": "Result", + "type": "string", + "nullable": true + }, + "duration": { + "type": "string", + "format": "duration", + "title": "Duration" + }, + "num_tasks_pending": { + "title": "Num Tasks Pending", + "type": "integer", + "nullable": true + }, + "num_tasks_completed": { + "title": "Num Tasks Completed", + "type": "integer", + "nullable": true + } + }, + "type": "object", + "required": [ + "status", + "duration" + ], + "title": "GetBatchJobV1Response" + }, + "GetDockerImageBatchJobV1Response": { + "properties": { + "status": { + "$ref": "#/components/schemas/BatchJobStatus" + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "GetDockerImageBatchJobV1Response" + }, + "GetFileContentResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "ID of the requested file." + }, + "content": { + "type": "string", + "title": "Content", + "description": "File content." + } + }, + "type": "object", + "required": [ + "id", + "content" + ], + "title": "GetFileContentResponse", + "description": "Response object for retrieving a file's content." + }, + "GetFileResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "ID of the requested file." + }, + "filename": { + "type": "string", + "title": "Filename", + "description": "File name." + }, + "size": { + "type": "integer", + "title": "Size", + "description": "Length of the file, in characters." + } + }, + "type": "object", + "required": [ + "id", + "filename", + "size" + ], + "title": "GetFileResponse", + "description": "Response object for retrieving a file." + }, + "GetFineTuneEventsResponse": { + "properties": { + "events": { + "items": { + "$ref": "#/components/schemas/LLMFineTuneEvent" + }, + "type": "array", + "title": "Events" + } + }, + "type": "object", + "required": [ + "events" + ], + "title": "GetFineTuneEventsResponse" + }, + "GetFineTuneResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "Unique ID of the fine tune" + }, + "fine_tuned_model": { + "title": "Fine Tuned Model", + "description": "Name of the resulting fine-tuned model. This can be plugged into the Completion API ones the fine-tune is complete", + "type": "string", + "nullable": true + }, + "status": { + "$ref": "#/components/schemas/BatchJobStatus", + "description": "Status of the requested fine tune." + } + }, + "type": "object", + "required": [ + "id", + "status" + ], + "title": "GetFineTuneResponse" + }, + "GetLLMModelEndpointV1Response": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "model_name": { + "type": "string", + "title": "Model Name" + }, + "source": { + "$ref": "#/components/schemas/LLMSource" + }, + "status": { + "$ref": "#/components/schemas/ModelEndpointStatus" + }, + "inference_framework": { + "$ref": "#/components/schemas/LLMInferenceFramework" + }, + "inference_framework_image_tag": { + "title": "Inference Framework Image Tag", + "type": "string", + "nullable": true + }, + "num_shards": { + "title": "Num Shards", + "type": "integer", + "nullable": true + }, + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "spec": { + "$ref": "#/components/schemas/GetModelEndpointV1Response", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "name", + "model_name", + "source", + "status", + "inference_framework" + ], + "title": "GetLLMModelEndpointV1Response" + }, + "GetModelEndpointV1Response": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "endpoint_type": { + "$ref": "#/components/schemas/ModelEndpointType" + }, + "destination": { + "type": "string", + "title": "Destination" + }, + "deployment_name": { + "title": "Deployment Name", + "type": "string", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "bundle_name": { + "type": "string", + "title": "Bundle Name" + }, + "status": { + "$ref": "#/components/schemas/ModelEndpointStatus" + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "labels": { + "title": "Labels", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "aws_role": { + "title": "Aws Role", + "type": "string", + "nullable": true + }, + "results_s3_bucket": { + "title": "Results S3 Bucket", + "type": "string", + "nullable": true + }, + "created_by": { + "type": "string", + "title": "Created By" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "last_updated_at": { + "type": "string", + "format": "date-time", + "title": "Last Updated At" + }, + "deployment_state": { + "$ref": "#/components/schemas/ModelEndpointDeploymentState", + "nullable": true + }, + "resource_state": { + "$ref": "#/components/schemas/ModelEndpointResourceState", + "nullable": true + }, + "num_queued_items": { + "title": "Num Queued Items", + "type": "integer", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "name", + "endpoint_type", + "destination", + "bundle_name", + "status", + "created_by", + "created_at", + "last_updated_at" + ], + "title": "GetModelEndpointV1Response" + }, + "GetTriggerV1Response": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "owner": { + "type": "string", + "title": "Owner" + }, + "created_by": { + "type": "string", + "title": "Created By" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "cron_schedule": { + "type": "string", + "title": "Cron Schedule" + }, + "docker_image_batch_job_bundle_id": { + "type": "string", + "title": "Docker Image Batch Job Bundle Id" + }, + "default_job_config": { + "title": "Default Job Config", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_job_metadata": { + "title": "Default Job Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "name", + "owner", + "created_by", + "created_at", + "cron_schedule", + "docker_image_batch_job_bundle_id" + ], + "title": "GetTriggerV1Response" + }, + "GpuType": { + "type": "string", + "enum": [ + "nvidia-tesla-t4", + "nvidia-ampere-a10", + "nvidia-ampere-a100", + "nvidia-ampere-a100e", + "nvidia-hopper-h100", + "nvidia-hopper-h100-1g20gb", + "nvidia-hopper-h100-3g40gb" + ], + "title": "GpuType", + "description": "Lists allowed GPU types for Launch." + }, + "HTTPValidationError": { + "properties": { + "detail": { + "items": { + "$ref": "#/components/schemas/ValidationError" + }, + "type": "array", + "title": "Detail" + } + }, + "type": "object", + "title": "HTTPValidationError" + }, + "ImageUrl": { + "properties": { + "url": { + "type": "string", + "maxLength": 65536, + "minLength": 1, + "format": "uri", + "title": "Url", + "description": "Either a URL of the image or the base64 encoded image data.", + "example": "http://www.example.com/" + }, + "detail": { + "type": "string", + "enum": [ + "auto", + "low", + "high" + ], + "title": "Detail", + "description": "Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).", + "default": "auto" + } + }, + "type": "object", + "required": [ + "url" + ], + "title": "ImageUrl" + }, + "InputAudio": { + "properties": { + "data": { + "type": "string", + "title": "Data", + "description": "Base64 encoded audio data." + }, + "format": { + "type": "string", + "enum": [ + "wav", + "mp3" + ], + "title": "Format", + "description": "The format of the encoded audio data. Currently supports \"wav\" and \"mp3\".\n" + } + }, + "type": "object", + "required": [ + "data", + "format" + ], + "title": "InputAudio" + }, + "JsonSchema": { + "properties": { + "description": { + "title": "Description", + "description": "A description of what the response format is for, used by the model to\ndetermine how to respond in the format.\n", + "type": "string", + "nullable": true + }, + "name": { + "type": "string", + "title": "Name", + "description": "The name of the response format. Must be a-z, A-Z, 0-9, or contain\nunderscores and dashes, with a maximum length of 64.\n" + }, + "schema": { + "$ref": "#/components/schemas/ResponseFormatJsonSchemaSchema", + "nullable": true + }, + "strict": { + "title": "Strict", + "description": "Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](/docs/guides/structured-outputs).\n", + "default": false, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "JsonSchema" + }, + "LLMFineTuneEvent": { + "properties": { + "timestamp": { + "title": "Timestamp", + "type": "number", + "nullable": true + }, + "message": { + "type": "string", + "title": "Message" + }, + "level": { + "type": "string", + "title": "Level" + } + }, + "type": "object", + "required": [ + "message", + "level" + ], + "title": "LLMFineTuneEvent" + }, + "LLMInferenceFramework": { + "type": "string", + "enum": [ + "deepspeed", + "text_generation_inference", + "vllm", + "lightllm", + "tensorrt_llm", + "sglang" + ], + "title": "LLMInferenceFramework" + }, + "LLMSource": { + "type": "string", + "enum": [ + "hugging_face" + ], + "title": "LLMSource" + }, + "ListDockerImageBatchJobBundleV1Response": { + "properties": { + "docker_image_batch_job_bundles": { + "items": { + "$ref": "#/components/schemas/DockerImageBatchJobBundleV1Response" + }, + "type": "array", + "title": "Docker Image Batch Job Bundles" + } + }, + "type": "object", + "required": [ + "docker_image_batch_job_bundles" + ], + "title": "ListDockerImageBatchJobBundleV1Response" + }, + "ListDockerImageBatchJobsV1Response": { + "properties": { + "jobs": { + "items": { + "$ref": "#/components/schemas/DockerImageBatchJob" + }, + "type": "array", + "title": "Jobs" + } + }, + "type": "object", + "required": [ + "jobs" + ], + "title": "ListDockerImageBatchJobsV1Response" + }, + "ListFilesResponse": { + "properties": { + "files": { + "items": { + "$ref": "#/components/schemas/GetFileResponse" + }, + "type": "array", + "title": "Files", + "description": "List of file IDs, names, and sizes." + } + }, + "type": "object", + "required": [ + "files" + ], + "title": "ListFilesResponse", + "description": "Response object for listing files." + }, + "ListFineTunesResponse": { + "properties": { + "jobs": { + "items": { + "$ref": "#/components/schemas/GetFineTuneResponse" + }, + "type": "array", + "title": "Jobs" + } + }, + "type": "object", + "required": [ + "jobs" + ], + "title": "ListFineTunesResponse" + }, + "ListLLMModelEndpointsV1Response": { + "properties": { + "model_endpoints": { + "items": { + "$ref": "#/components/schemas/GetLLMModelEndpointV1Response" + }, + "type": "array", + "title": "Model Endpoints" + } + }, + "type": "object", + "required": [ + "model_endpoints" + ], + "title": "ListLLMModelEndpointsV1Response" + }, + "ListModelBundlesV1Response": { + "properties": { + "model_bundles": { + "items": { + "$ref": "#/components/schemas/ModelBundleV1Response" + }, + "type": "array", + "title": "Model Bundles" + } + }, + "type": "object", + "required": [ + "model_bundles" + ], + "title": "ListModelBundlesV1Response", + "description": "Response object for listing Model Bundles." + }, + "ListModelBundlesV2Response": { + "properties": { + "model_bundles": { + "items": { + "$ref": "#/components/schemas/ModelBundleV2Response" + }, + "type": "array", + "title": "Model Bundles" + } + }, + "type": "object", + "required": [ + "model_bundles" + ], + "title": "ListModelBundlesV2Response", + "description": "Response object for listing Model Bundles." + }, + "ListModelEndpointsV1Response": { + "properties": { + "model_endpoints": { + "items": { + "$ref": "#/components/schemas/GetModelEndpointV1Response" + }, + "type": "array", + "title": "Model Endpoints" + } + }, + "type": "object", + "required": [ + "model_endpoints" + ], + "title": "ListModelEndpointsV1Response" + }, + "ListTriggersV1Response": { + "properties": { + "triggers": { + "items": { + "$ref": "#/components/schemas/GetTriggerV1Response" + }, + "type": "array", + "title": "Triggers" + } + }, + "type": "object", + "required": [ + "triggers" + ], + "title": "ListTriggersV1Response" + }, + "Logprobs": { + "properties": { + "content": { + "title": "Content", + "description": "A list of message content tokens with log probability information.", + "items": { + "$ref": "#/components/schemas/ChatCompletionTokenLogprob" + }, + "type": "array", + "nullable": true + }, + "refusal": { + "title": "Refusal", + "description": "A list of message refusal tokens with log probability information.", + "items": { + "$ref": "#/components/schemas/ChatCompletionTokenLogprob" + }, + "type": "array", + "nullable": true + } + }, + "type": "object", + "required": [ + "content", + "refusal" + ], + "title": "Logprobs" + }, + "Logprobs2": { + "properties": { + "text_offset": { + "title": "Text Offset", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "token_logprobs": { + "title": "Token Logprobs", + "items": { + "type": "number" + }, + "type": "array", + "nullable": true + }, + "tokens": { + "title": "Tokens", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "top_logprobs": { + "title": "Top Logprobs", + "items": { + "additionalProperties": { + "type": "number" + }, + "type": "object" + }, + "type": "array", + "nullable": true + } + }, + "type": "object", + "title": "Logprobs2" + }, + "Metadata": { + "title": "Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "ModelBundleEnvironmentParams": { + "properties": { + "framework_type": { + "$ref": "#/components/schemas/ModelBundleFrameworkType" + }, + "pytorch_image_tag": { + "title": "Pytorch Image Tag", + "type": "string", + "nullable": true + }, + "tensorflow_version": { + "title": "Tensorflow Version", + "type": "string", + "nullable": true + }, + "ecr_repo": { + "title": "Ecr Repo", + "type": "string", + "nullable": true + }, + "image_tag": { + "title": "Image Tag", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "framework_type" + ], + "title": "ModelBundleEnvironmentParams", + "description": "This is the entity-layer class for the Model Bundle environment parameters. Being an\nentity-layer class, it should be a plain data object." + }, + "ModelBundleFrameworkType": { + "type": "string", + "enum": [ + "pytorch", + "tensorflow", + "custom_base_image" + ], + "title": "ModelBundleFrameworkType", + "description": "The canonical list of possible machine learning frameworks of Model Bundles." + }, + "ModelBundleOrderBy": { + "type": "string", + "enum": [ + "newest", + "oldest" + ], + "title": "ModelBundleOrderBy", + "description": "The canonical list of possible orderings of Model Bundles." + }, + "ModelBundlePackagingType": { + "type": "string", + "enum": [ + "cloudpickle", + "zip", + "lira" + ], + "title": "ModelBundlePackagingType", + "description": "The canonical list of possible packaging types for Model Bundles.\n\nThese values broadly determine how the model endpoint will obtain its code & dependencies." + }, + "ModelBundleV1Response": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "location": { + "type": "string", + "title": "Location" + }, + "requirements": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Requirements" + }, + "env_params": { + "$ref": "#/components/schemas/ModelBundleEnvironmentParams" + }, + "packaging_type": { + "$ref": "#/components/schemas/ModelBundlePackagingType" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "app_config": { + "title": "App Config", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "model_artifact_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Model Artifact Ids" + }, + "schema_location": { + "title": "Schema Location", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "id", + "name", + "location", + "requirements", + "env_params", + "packaging_type", + "metadata", + "created_at", + "model_artifact_ids" + ], + "title": "ModelBundleV1Response", + "description": "Response object for a single Model Bundle." + }, + "ModelBundleV2Response": { + "properties": { + "id": { + "type": "string", + "title": "Id" + }, + "name": { + "type": "string", + "title": "Name" + }, + "metadata": { + "additionalProperties": true, + "type": "object", + "title": "Metadata" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Created At" + }, + "model_artifact_ids": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Model Artifact Ids" + }, + "schema_location": { + "title": "Schema Location", + "type": "string", + "nullable": true + }, + "flavor": { + "oneOf": [ + { + "$ref": "#/components/schemas/CloudpickleArtifactFlavor" + }, + { + "$ref": "#/components/schemas/ZipArtifactFlavor" + }, + { + "$ref": "#/components/schemas/RunnableImageFlavor" + }, + { + "$ref": "#/components/schemas/StreamingEnhancedRunnableImageFlavor" + }, + { + "$ref": "#/components/schemas/TritonEnhancedRunnableImageFlavor" + } + ], + "title": "Flavor", + "discriminator": { + "propertyName": "flavor", + "mapping": { + "cloudpickle_artifact": "#/components/schemas/CloudpickleArtifactFlavor", + "runnable_image": "#/components/schemas/RunnableImageFlavor", + "streaming_enhanced_runnable_image": "#/components/schemas/StreamingEnhancedRunnableImageFlavor", + "triton_enhanced_runnable_image": "#/components/schemas/TritonEnhancedRunnableImageFlavor", + "zip_artifact": "#/components/schemas/ZipArtifactFlavor" + } + } + } + }, + "type": "object", + "required": [ + "id", + "name", + "metadata", + "created_at", + "model_artifact_ids", + "flavor" + ], + "title": "ModelBundleV2Response", + "description": "Response object for a single Model Bundle." + }, + "ModelDownloadRequest": { + "properties": { + "model_name": { + "type": "string", + "title": "Model Name", + "description": "Name of the fine tuned model" + }, + "download_format": { + "title": "Download Format", + "description": "Format that you want the downloaded urls to be compatible with. Currently only supports hugging_face", + "default": "hugging_face", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "model_name" + ], + "title": "ModelDownloadRequest" + }, + "ModelDownloadResponse": { + "properties": { + "urls": { + "additionalProperties": { + "type": "string" + }, + "type": "object", + "title": "Urls", + "description": "Dictionary of (file_name, url) pairs to download the model from." + } + }, + "type": "object", + "required": [ + "urls" + ], + "title": "ModelDownloadResponse" + }, + "ModelEndpointDeploymentState": { + "properties": { + "min_workers": { + "type": "integer", + "minimum": 0.0, + "title": "Min Workers" + }, + "max_workers": { + "type": "integer", + "minimum": 0.0, + "title": "Max Workers" + }, + "per_worker": { + "type": "integer", + "exclusiveMinimum": 0.0, + "title": "Per Worker" + }, + "concurrent_requests_per_worker": { + "type": "integer", + "exclusiveMinimum": 0.0, + "title": "Concurrent Requests Per Worker" + }, + "available_workers": { + "title": "Available Workers", + "type": "integer", + "minimum": 0.0, + "nullable": true + }, + "unavailable_workers": { + "title": "Unavailable Workers", + "type": "integer", + "minimum": 0.0, + "nullable": true + } + }, + "type": "object", + "required": [ + "min_workers", + "max_workers", + "per_worker", + "concurrent_requests_per_worker" + ], + "title": "ModelEndpointDeploymentState", + "description": "This is the entity-layer class for the deployment settings related to a Model Endpoint." + }, + "ModelEndpointOrderBy": { + "type": "string", + "enum": [ + "newest", + "oldest", + "alphabetical" + ], + "title": "ModelEndpointOrderBy", + "description": "The canonical list of possible orderings of Model Bundles." + }, + "ModelEndpointResourceState": { + "properties": { + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus" + }, + "gpus": { + "type": "integer", + "minimum": 0.0, + "title": "Gpus" + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory" + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "type": "integer", + "minimum": 1.0, + "title": "Nodes Per Worker" + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "cpus", + "gpus", + "memory", + "nodes_per_worker" + ], + "title": "ModelEndpointResourceState", + "description": "This is the entity-layer class for the resource settings per worker of a Model Endpoint.\nNote: in the multinode case, there are multiple \"nodes\" per \"worker\".\n\"Nodes\" is analogous to a single k8s pod that may take up all the GPUs on a single machine.\n\"Workers\" is the smallest unit that a request can be made to, and consists of one leader \"node\" and\nmultiple follower \"nodes\" (named \"worker\" in the k8s LeaderWorkerSet definition).\ncpus/gpus/memory/storage are per-node, thus the total consumption by a \"worker\"\nis cpus/gpus/etc. multiplied by nodes_per_worker." + }, + "ModelEndpointStatus": { + "type": "string", + "enum": [ + "READY", + "UPDATE_PENDING", + "UPDATE_IN_PROGRESS", + "UPDATE_FAILED", + "DELETE_IN_PROGRESS" + ], + "title": "ModelEndpointStatus" + }, + "ModelEndpointType": { + "type": "string", + "enum": [ + "async", + "sync", + "streaming" + ], + "title": "ModelEndpointType" + }, + "ParallelToolCalls": { + "type": "boolean", + "title": "ParallelToolCalls", + "description": "Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use." + }, + "PredictionContent": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of the predicted content you want to provide. This type is\ncurrently always `content`.\n", + "enum": [ + "content" + ] + }, + "content": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/Content8" + } + ], + "title": "Content", + "description": "The content that should be matched when generating a model response.\nIf generated tokens would match this content, the entire model response\ncan be returned much more quickly.\n" + } + }, + "type": "object", + "required": [ + "type", + "content" + ], + "title": "PredictionContent" + }, + "Prompt": { + "title": "Prompt", + "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", + "items": { + "type": "integer" + }, + "type": "array", + "minItems": 1, + "nullable": true + }, + "Prompt1": { + "title": "Prompt1", + "description": "The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.\n\nNote that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.\n", + "items": { + "$ref": "#/components/schemas/Prompt1Item" + }, + "type": "array", + "minItems": 1, + "nullable": true + }, + "Prompt1Item": { + "items": { + "type": "integer" + }, + "type": "array", + "minItems": 1, + "title": "Prompt1Item" + }, + "PromptTokensDetails": { + "properties": { + "audio_tokens": { + "type": "integer", + "title": "Audio Tokens", + "description": "Audio input tokens present in the prompt.", + "default": 0 + }, + "cached_tokens": { + "type": "integer", + "title": "Cached Tokens", + "description": "Cached tokens present in the prompt.", + "default": 0 + } + }, + "type": "object", + "title": "PromptTokensDetails" + }, + "PytorchFramework": { + "properties": { + "framework_type": { + "type": "string", + "title": "Framework Type", + "enum": [ + "pytorch" + ] + }, + "pytorch_image_tag": { + "type": "string", + "title": "Pytorch Image Tag" + } + }, + "type": "object", + "required": [ + "framework_type", + "pytorch_image_tag" + ], + "title": "PytorchFramework", + "description": "This is the entity-layer class for a Pytorch framework specification." + }, + "Quantization": { + "type": "string", + "enum": [ + "bitsandbytes", + "awq" + ], + "title": "Quantization" + }, + "ReasoningEffort": { + "title": "ReasoningEffort", + "description": "**o-series models only** \n\nConstrains effort on reasoning for \n[reasoning models](https://platform.openai.com/docs/guides/reasoning).\nCurrently supported values are `low`, `medium`, and `high`. Reducing\nreasoning effort can result in faster responses and fewer tokens used\non reasoning in a response.\n", + "default": "medium", + "type": "string", + "enum": [ + "low", + "medium", + "high" + ], + "nullable": true + }, + "RequestSchema": { + "title": "RequestSchema" + }, + "ResponseFormatJsonObject": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of response format being defined. Always `json_object`.", + "enum": [ + "json_object" + ] + } + }, + "type": "object", + "required": [ + "type" + ], + "title": "ResponseFormatJsonObject" + }, + "ResponseFormatJsonSchema": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of response format being defined. Always `json_schema`.", + "enum": [ + "json_schema" + ] + }, + "json_schema": { + "$ref": "#/components/schemas/JsonSchema", + "title": "JSON schema", + "description": "Structured Outputs configuration options, including a JSON Schema.\n" + } + }, + "type": "object", + "required": [ + "type", + "json_schema" + ], + "title": "ResponseFormatJsonSchema" + }, + "ResponseFormatJsonSchemaSchema": { + "properties": {}, + "additionalProperties": true, + "type": "object", + "title": "ResponseFormatJsonSchemaSchema" + }, + "ResponseFormatText": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of response format being defined. Always `text`.", + "enum": [ + "text" + ] + } + }, + "type": "object", + "required": [ + "type" + ], + "title": "ResponseFormatText" + }, + "ResponseModalities": { + "title": "ResponseModalities", + "description": "Output types that you would like the model to generate.\nMost models are capable of generating text, which is the default:\n\n`[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to \n[generate audio](/docs/guides/audio). To request that this model generate \nboth text and audio responses, you can use:\n\n`[\"text\", \"audio\"]`\n", + "items": { + "type": "string", + "enum": [ + "text", + "audio" + ] + }, + "type": "array", + "nullable": true + }, + "ResponseSchema": { + "title": "ResponseSchema" + }, + "RestartModelEndpointV1Response": { + "properties": { + "restarted": { + "type": "boolean", + "title": "Restarted" + } + }, + "type": "object", + "required": [ + "restarted" + ], + "title": "RestartModelEndpointV1Response" + }, + "RunnableImageFlavor": { + "properties": { + "repository": { + "type": "string", + "title": "Repository" + }, + "tag": { + "type": "string", + "title": "Tag" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Command" + }, + "predict_route": { + "type": "string", + "title": "Predict Route", + "default": "/predict" + }, + "healthcheck_route": { + "type": "string", + "title": "Healthcheck Route", + "default": "/readyz" + }, + "env": { + "title": "Env", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "protocol": { + "type": "string", + "title": "Protocol", + "enum": [ + "http" + ] + }, + "readiness_initial_delay_seconds": { + "type": "integer", + "title": "Readiness Initial Delay Seconds", + "default": 120 + }, + "extra_routes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Extra Routes" + }, + "routes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Routes" + }, + "forwarder_type": { + "title": "Forwarder Type", + "default": "default", + "type": "string", + "nullable": true + }, + "worker_command": { + "title": "Worker Command", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "worker_env": { + "title": "Worker Env", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "flavor": { + "type": "string", + "title": "Flavor", + "enum": [ + "runnable_image" + ] + } + }, + "type": "object", + "required": [ + "repository", + "tag", + "command", + "protocol", + "flavor" + ], + "title": "RunnableImageFlavor", + "description": "This is the entity-layer class for the Model Bundle flavor of a runnable image." + }, + "ServiceTier": { + "title": "ServiceTier", + "description": "Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:\n - If set to 'auto', and the Project is Scale tier enabled, the system\n will utilize scale tier credits until they are exhausted.\n - If set to 'auto', and the Project is not Scale tier enabled, the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.\n - If set to 'flex', the request will be processed with the Flex Processing service tier. [Learn more](/docs/guides/flex-processing).\n - When not set, the default behavior is 'auto'.\n\n When this parameter is set, the response body will include the `service_tier` utilized.\n", + "default": "auto", + "type": "string", + "enum": [ + "auto", + "default", + "flex" + ], + "nullable": true + }, + "StopConfiguration": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/components/schemas/StopConfiguration1" + } + ], + "title": "StopConfiguration", + "description": "Not supported with latest reasoning models `o3` and `o4-mini`.\n\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.\n", + "nullable": true + }, + "StopConfiguration1": { + "title": "StopConfiguration1", + "description": "Not supported with latest reasoning models `o3` and `o4-mini`.\n\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence.\n", + "items": { + "type": "string" + }, + "type": "array", + "maxItems": 4, + "minItems": 1, + "nullable": true + }, + "StreamError": { + "properties": { + "status_code": { + "type": "integer", + "title": "Status Code" + }, + "content": { + "$ref": "#/components/schemas/StreamErrorContent" + } + }, + "type": "object", + "required": [ + "status_code", + "content" + ], + "title": "StreamError", + "description": "Error object for a stream prompt completion task." + }, + "StreamErrorContent": { + "properties": { + "error": { + "type": "string", + "title": "Error" + }, + "timestamp": { + "type": "string", + "title": "Timestamp" + } + }, + "type": "object", + "required": [ + "error", + "timestamp" + ], + "title": "StreamErrorContent" + }, + "StreamingEnhancedRunnableImageFlavor": { + "properties": { + "repository": { + "type": "string", + "title": "Repository" + }, + "tag": { + "type": "string", + "title": "Tag" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Command", + "default": [] + }, + "predict_route": { + "type": "string", + "title": "Predict Route", + "default": "/predict" + }, + "healthcheck_route": { + "type": "string", + "title": "Healthcheck Route", + "default": "/readyz" + }, + "env": { + "title": "Env", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "protocol": { + "type": "string", + "title": "Protocol", + "enum": [ + "http" + ] + }, + "readiness_initial_delay_seconds": { + "type": "integer", + "title": "Readiness Initial Delay Seconds", + "default": 120 + }, + "extra_routes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Extra Routes" + }, + "routes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Routes" + }, + "forwarder_type": { + "title": "Forwarder Type", + "default": "default", + "type": "string", + "nullable": true + }, + "worker_command": { + "title": "Worker Command", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "worker_env": { + "title": "Worker Env", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "flavor": { + "type": "string", + "title": "Flavor", + "enum": [ + "streaming_enhanced_runnable_image" + ] + }, + "streaming_command": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Streaming Command" + }, + "streaming_predict_route": { + "type": "string", + "title": "Streaming Predict Route", + "default": "/stream" + } + }, + "type": "object", + "required": [ + "repository", + "tag", + "protocol", + "flavor", + "streaming_command" + ], + "title": "StreamingEnhancedRunnableImageFlavor", + "description": "For deployments that expose a streaming route in a container." + }, + "SyncEndpointPredictV1Request": { + "properties": { + "url": { + "title": "Url", + "type": "string", + "nullable": true + }, + "args": { + "$ref": "#/components/schemas/RequestSchema", + "nullable": true + }, + "cloudpickle": { + "title": "Cloudpickle", + "type": "string", + "nullable": true + }, + "callback_url": { + "title": "Callback Url", + "type": "string", + "nullable": true + }, + "callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "return_pickled": { + "type": "boolean", + "title": "Return Pickled", + "default": false + }, + "destination_path": { + "title": "Destination Path", + "type": "string", + "nullable": true + }, + "timeout_seconds": { + "title": "Timeout Seconds", + "type": "number", + "exclusiveMinimum": 0.0, + "nullable": true + }, + "num_retries": { + "title": "Num Retries", + "type": "integer", + "minimum": 0.0, + "nullable": true + } + }, + "type": "object", + "title": "SyncEndpointPredictV1Request" + }, + "SyncEndpointPredictV1Response": { + "properties": { + "status": { + "$ref": "#/components/schemas/TaskStatus" + }, + "result": { + "title": "Result", + "nullable": true + }, + "traceback": { + "title": "Traceback", + "type": "string", + "nullable": true + }, + "status_code": { + "title": "Status Code", + "type": "integer", + "nullable": true + } + }, + "type": "object", + "required": [ + "status" + ], + "title": "SyncEndpointPredictV1Response" + }, + "TaskStatus": { + "type": "string", + "enum": [ + "PENDING", + "STARTED", + "SUCCESS", + "FAILURE", + "UNDEFINED" + ], + "title": "TaskStatus" + }, + "TensorflowFramework": { + "properties": { + "framework_type": { + "type": "string", + "title": "Framework Type", + "enum": [ + "tensorflow" + ] + }, + "tensorflow_version": { + "type": "string", + "title": "Tensorflow Version" + } + }, + "type": "object", + "required": [ + "framework_type", + "tensorflow_version" + ], + "title": "TensorflowFramework", + "description": "This is the entity-layer class for a Tensorflow framework specification." + }, + "TokenOutput": { + "properties": { + "token": { + "type": "string", + "title": "Token" + }, + "log_prob": { + "type": "number", + "title": "Log Prob" + } + }, + "type": "object", + "required": [ + "token", + "log_prob" + ], + "title": "TokenOutput", + "description": "Detailed token information." + }, + "ToolConfig": { + "properties": { + "name": { + "type": "string", + "title": "Name" + }, + "max_iterations": { + "title": "Max Iterations", + "default": 10, + "type": "integer", + "nullable": true + }, + "execution_timeout_seconds": { + "title": "Execution Timeout Seconds", + "default": 60, + "type": "integer", + "nullable": true + }, + "should_retry_on_error": { + "title": "Should Retry On Error", + "default": true, + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "required": [ + "name" + ], + "title": "ToolConfig", + "description": "Configuration for tool use.\nNOTE: this config is highly experimental and signature will change significantly in future iterations." + }, + "TopLogprob": { + "properties": { + "token": { + "type": "string", + "title": "Token", + "description": "The token." + }, + "logprob": { + "type": "number", + "title": "Logprob", + "description": "The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely." + }, + "bytes": { + "title": "Bytes", + "description": "A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + } + }, + "type": "object", + "required": [ + "token", + "logprob", + "bytes" + ], + "title": "TopLogprob" + }, + "TritonEnhancedRunnableImageFlavor": { + "properties": { + "repository": { + "type": "string", + "title": "Repository" + }, + "tag": { + "type": "string", + "title": "Tag" + }, + "command": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Command" + }, + "predict_route": { + "type": "string", + "title": "Predict Route", + "default": "/predict" + }, + "healthcheck_route": { + "type": "string", + "title": "Healthcheck Route", + "default": "/readyz" + }, + "env": { + "title": "Env", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "protocol": { + "type": "string", + "title": "Protocol", + "enum": [ + "http" + ] + }, + "readiness_initial_delay_seconds": { + "type": "integer", + "title": "Readiness Initial Delay Seconds", + "default": 120 + }, + "extra_routes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Extra Routes" + }, + "routes": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Routes" + }, + "forwarder_type": { + "title": "Forwarder Type", + "default": "default", + "type": "string", + "nullable": true + }, + "worker_command": { + "title": "Worker Command", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "worker_env": { + "title": "Worker Env", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "flavor": { + "type": "string", + "title": "Flavor", + "enum": [ + "triton_enhanced_runnable_image" + ] + }, + "triton_model_repository": { + "type": "string", + "title": "Triton Model Repository" + }, + "triton_model_replicas": { + "title": "Triton Model Replicas", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "triton_num_cpu": { + "type": "number", + "title": "Triton Num Cpu" + }, + "triton_commit_tag": { + "type": "string", + "title": "Triton Commit Tag" + }, + "triton_storage": { + "title": "Triton Storage", + "type": "string", + "nullable": true + }, + "triton_memory": { + "title": "Triton Memory", + "type": "string", + "nullable": true + }, + "triton_readiness_initial_delay_seconds": { + "type": "integer", + "title": "Triton Readiness Initial Delay Seconds", + "default": 300 + } + }, + "type": "object", + "required": [ + "repository", + "tag", + "command", + "protocol", + "flavor", + "triton_model_repository", + "triton_num_cpu", + "triton_commit_tag" + ], + "title": "TritonEnhancedRunnableImageFlavor", + "description": "For deployments that require tritonserver running in a container." + }, + "UpdateBatchCompletionsV2Request": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id", + "description": "ID of the batch completions job" + }, + "priority": { + "title": "Priority", + "description": "Priority of the batch inference job. Default to None.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "required": [ + "job_id" + ], + "title": "UpdateBatchCompletionsV2Request" + }, + "UpdateBatchCompletionsV2Response": { + "properties": { + "job_id": { + "type": "string", + "title": "Job Id" + }, + "input_data_path": { + "title": "Input Data Path", + "description": "Path to the input file. The input file should be a JSON file of type List[CreateBatchCompletionsRequestContent].", + "type": "string", + "nullable": true + }, + "output_data_path": { + "type": "string", + "title": "Output Data Path", + "description": "Path to the output file. The output file will be a JSON file of type List[CompletionOutput]." + }, + "model_config": { + "$ref": "#/components/schemas/BatchCompletionsModelConfig", + "description": "Model configuration for the batch inference. Hardware configurations are inferred." + }, + "priority": { + "title": "Priority", + "description": "Priority of the batch inference job. Default to None.", + "type": "string", + "nullable": true + }, + "status": { + "$ref": "#/components/schemas/BatchCompletionsJobStatus" + }, + "created_at": { + "type": "string", + "title": "Created At" + }, + "expires_at": { + "type": "string", + "title": "Expires At" + }, + "completed_at": { + "title": "Completed At", + "type": "string", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "success": { + "type": "boolean", + "title": "Success", + "description": "Whether the update was successful" + } + }, + "type": "object", + "required": [ + "job_id", + "output_data_path", + "model_config", + "status", + "created_at", + "expires_at", + "completed_at", + "metadata", + "success" + ], + "title": "UpdateBatchCompletionsV2Response" + }, + "UpdateBatchJobV1Request": { + "properties": { + "cancel": { + "type": "boolean", + "title": "Cancel" + } + }, + "type": "object", + "required": [ + "cancel" + ], + "title": "UpdateBatchJobV1Request" + }, + "UpdateBatchJobV1Response": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "UpdateBatchJobV1Response" + }, + "UpdateDeepSpeedModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "model_name": { + "title": "Model Name", + "type": "string", + "nullable": true + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "nullable": true + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "deepspeed", + "enum": [ + "deepspeed" + ] + }, + "inference_framework_image_tag": { + "title": "Inference Framework Image Tag", + "type": "string", + "nullable": true + }, + "num_shards": { + "title": "Num Shards", + "type": "integer", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "force_bundle_recreation": { + "title": "Force Bundle Recreation", + "default": false, + "type": "boolean", + "nullable": true + }, + "min_workers": { + "title": "Min Workers", + "type": "integer", + "nullable": true + }, + "max_workers": { + "title": "Max Workers", + "type": "integer", + "nullable": true + }, + "per_worker": { + "title": "Per Worker", + "type": "integer", + "nullable": true + }, + "labels": { + "title": "Labels", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + } + }, + "type": "object", + "title": "UpdateDeepSpeedModelEndpointRequest" + }, + "UpdateDockerImageBatchJobV1Request": { + "properties": { + "cancel": { + "type": "boolean", + "title": "Cancel" + } + }, + "type": "object", + "required": [ + "cancel" + ], + "title": "UpdateDockerImageBatchJobV1Request" + }, + "UpdateDockerImageBatchJobV1Response": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "UpdateDockerImageBatchJobV1Response" + }, + "UpdateLLMModelEndpointV1Response": { + "properties": { + "endpoint_creation_task_id": { + "type": "string", + "title": "Endpoint Creation Task Id" + } + }, + "type": "object", + "required": [ + "endpoint_creation_task_id" + ], + "title": "UpdateLLMModelEndpointV1Response" + }, + "UpdateModelEndpointV1Request": { + "properties": { + "model_bundle_id": { + "title": "Model Bundle Id", + "type": "string", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "minimum": 0.0, + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "min_workers": { + "title": "Min Workers", + "type": "integer", + "minimum": 0.0, + "nullable": true + }, + "max_workers": { + "title": "Max Workers", + "type": "integer", + "minimum": 0.0, + "nullable": true + }, + "per_worker": { + "title": "Per Worker", + "type": "integer", + "exclusiveMinimum": 0.0, + "nullable": true + }, + "concurrent_requests_per_worker": { + "title": "Concurrent Requests Per Worker", + "type": "integer", + "exclusiveMinimum": 0.0, + "nullable": true + }, + "labels": { + "title": "Labels", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "title": "UpdateModelEndpointV1Request" + }, + "UpdateModelEndpointV1Response": { + "properties": { + "endpoint_creation_task_id": { + "type": "string", + "title": "Endpoint Creation Task Id" + } + }, + "type": "object", + "required": [ + "endpoint_creation_task_id" + ], + "title": "UpdateModelEndpointV1Response" + }, + "UpdateSGLangModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "model_name": { + "title": "Model Name", + "type": "string", + "nullable": true + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "nullable": true + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "sglang", + "enum": [ + "sglang" + ] + }, + "inference_framework_image_tag": { + "title": "Inference Framework Image Tag", + "type": "string", + "nullable": true + }, + "num_shards": { + "title": "Num Shards", + "type": "integer", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "force_bundle_recreation": { + "title": "Force Bundle Recreation", + "default": false, + "type": "boolean", + "nullable": true + }, + "min_workers": { + "title": "Min Workers", + "type": "integer", + "nullable": true + }, + "max_workers": { + "title": "Max Workers", + "type": "integer", + "nullable": true + }, + "per_worker": { + "title": "Per Worker", + "type": "integer", + "nullable": true + }, + "labels": { + "title": "Labels", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "trust_remote_code": { + "title": "Trust Remote Code", + "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", + "default": false, + "type": "boolean", + "nullable": true + }, + "tp_size": { + "title": "Tp Size", + "description": "The tensor parallel size.", + "type": "integer", + "nullable": true + }, + "skip_tokenizer_init": { + "title": "Skip Tokenizer Init", + "description": "If set, skip init tokenizer and pass input_ids in generate request", + "type": "boolean", + "nullable": true + }, + "load_format": { + "title": "Load Format", + "description": "The format of the model weights to load.", + "type": "string", + "nullable": true + }, + "dtype": { + "title": "Dtype", + "description": "Data type for model weights and activations.", + "type": "string", + "nullable": true + }, + "kv_cache_dtype": { + "title": "Kv Cache Dtype", + "description": "Data type for kv cache storage. \"auto\" will use model data type.", + "type": "string", + "nullable": true + }, + "quantization_param_path": { + "title": "Quantization Param Path", + "description": "Path to the JSON file containing the KV cache scaling factors.", + "type": "string", + "nullable": true + }, + "quantization": { + "title": "Quantization", + "description": "The quantization method.", + "type": "string", + "nullable": true + }, + "context_length": { + "title": "Context Length", + "description": "The model's maximum context length.", + "type": "integer", + "nullable": true + }, + "device": { + "title": "Device", + "description": "The device type.", + "type": "string", + "nullable": true + }, + "served_model_name": { + "title": "Served Model Name", + "description": "Override the model name returned by the v1/models endpoint in OpenAI API server.", + "type": "string", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "The builtin chat template name or path of the chat template file.", + "type": "string", + "nullable": true + }, + "is_embedding": { + "title": "Is Embedding", + "description": "Whether to use a CausalLM as an embedding model.", + "type": "boolean", + "nullable": true + }, + "revision": { + "title": "Revision", + "description": "The specific model version to use.", + "type": "string", + "nullable": true + }, + "mem_fraction_static": { + "title": "Mem Fraction Static", + "description": "The fraction of the memory used for static allocation.", + "type": "number", + "nullable": true + }, + "max_running_requests": { + "title": "Max Running Requests", + "description": "The maximum number of running requests.", + "type": "integer", + "nullable": true + }, + "max_total_tokens": { + "title": "Max Total Tokens", + "description": "The maximum number of tokens in the memory pool.", + "type": "integer", + "nullable": true + }, + "chunked_prefill_size": { + "title": "Chunked Prefill Size", + "description": "The maximum number of tokens in a chunk for the chunked prefill.", + "type": "integer", + "nullable": true + }, + "max_prefill_tokens": { + "title": "Max Prefill Tokens", + "description": "The maximum number of tokens in a prefill batch.", + "type": "integer", + "nullable": true + }, + "schedule_policy": { + "title": "Schedule Policy", + "description": "The scheduling policy of the requests.", + "type": "string", + "nullable": true + }, + "schedule_conservativeness": { + "title": "Schedule Conservativeness", + "description": "How conservative the schedule policy is.", + "type": "number", + "nullable": true + }, + "cpu_offload_gb": { + "title": "Cpu Offload Gb", + "description": "How many GBs of RAM to reserve for CPU offloading", + "type": "integer", + "nullable": true + }, + "prefill_only_one_req": { + "title": "Prefill Only One Req", + "description": "If true, we only prefill one request at one prefill batch", + "type": "boolean", + "nullable": true + }, + "stream_interval": { + "title": "Stream Interval", + "description": "The interval for streaming in terms of the token length.", + "type": "integer", + "nullable": true + }, + "random_seed": { + "title": "Random Seed", + "description": "The random seed.", + "type": "integer", + "nullable": true + }, + "constrained_json_whitespace_pattern": { + "title": "Constrained Json Whitespace Pattern", + "description": "Regex pattern for syntactic whitespaces allowed in JSON constrained output.", + "type": "string", + "nullable": true + }, + "watchdog_timeout": { + "title": "Watchdog Timeout", + "description": "Set watchdog timeout in seconds.", + "type": "number", + "nullable": true + }, + "download_dir": { + "title": "Download Dir", + "description": "Model download directory.", + "type": "string", + "nullable": true + }, + "base_gpu_id": { + "title": "Base Gpu Id", + "description": "The base GPU ID to start allocating GPUs from.", + "type": "integer", + "nullable": true + }, + "log_level": { + "title": "Log Level", + "description": "The logging level of all loggers.", + "type": "string", + "nullable": true + }, + "log_level_http": { + "title": "Log Level Http", + "description": "The logging level of HTTP server.", + "type": "string", + "nullable": true + }, + "log_requests": { + "title": "Log Requests", + "description": "Log the inputs and outputs of all requests.", + "type": "boolean", + "nullable": true + }, + "show_time_cost": { + "title": "Show Time Cost", + "description": "Show time cost of custom marks.", + "type": "boolean", + "nullable": true + }, + "enable_metrics": { + "title": "Enable Metrics", + "description": "Enable log prometheus metrics.", + "type": "boolean", + "nullable": true + }, + "decode_log_interval": { + "title": "Decode Log Interval", + "description": "The log interval of decode batch.", + "type": "integer", + "nullable": true + }, + "api_key": { + "title": "Api Key", + "description": "Set API key of the server.", + "type": "string", + "nullable": true + }, + "file_storage_pth": { + "title": "File Storage Pth", + "description": "The path of the file storage in backend.", + "type": "string", + "nullable": true + }, + "enable_cache_report": { + "title": "Enable Cache Report", + "description": "Return number of cached tokens in usage.prompt_tokens_details.", + "type": "boolean", + "nullable": true + }, + "data_parallel_size": { + "title": "Data Parallel Size", + "description": "The data parallelism size.", + "type": "integer", + "nullable": true + }, + "load_balance_method": { + "title": "Load Balance Method", + "description": "The load balancing strategy for data parallelism.", + "type": "string", + "nullable": true + }, + "expert_parallel_size": { + "title": "Expert Parallel Size", + "description": "The expert parallelism size.", + "type": "integer", + "nullable": true + }, + "dist_init_addr": { + "title": "Dist Init Addr", + "description": "The host address for initializing distributed backend.", + "type": "string", + "nullable": true + }, + "nnodes": { + "title": "Nnodes", + "description": "The number of nodes.", + "type": "integer", + "nullable": true + }, + "node_rank": { + "title": "Node Rank", + "description": "The node rank.", + "type": "integer", + "nullable": true + }, + "json_model_override_args": { + "title": "Json Model Override Args", + "description": "A dictionary in JSON string format used to override default model configurations.", + "type": "string", + "nullable": true + }, + "lora_paths": { + "title": "Lora Paths", + "description": "The list of LoRA adapters.", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "max_loras_per_batch": { + "title": "Max Loras Per Batch", + "description": "Maximum number of adapters for a running batch.", + "type": "integer", + "nullable": true + }, + "attention_backend": { + "title": "Attention Backend", + "description": "Choose the kernels for attention layers.", + "type": "string", + "nullable": true + }, + "sampling_backend": { + "title": "Sampling Backend", + "description": "Choose the kernels for sampling layers.", + "type": "string", + "nullable": true + }, + "grammar_backend": { + "title": "Grammar Backend", + "description": "Choose the backend for grammar-guided decoding.", + "type": "string", + "nullable": true + }, + "speculative_algorithm": { + "title": "Speculative Algorithm", + "description": "Speculative algorithm.", + "type": "string", + "nullable": true + }, + "speculative_draft_model_path": { + "title": "Speculative Draft Model Path", + "description": "The path of the draft model weights.", + "type": "string", + "nullable": true + }, + "speculative_num_steps": { + "title": "Speculative Num Steps", + "description": "The number of steps sampled from draft model in Speculative Decoding.", + "type": "integer", + "nullable": true + }, + "speculative_num_draft_tokens": { + "title": "Speculative Num Draft Tokens", + "description": "The number of token sampled from draft model in Speculative Decoding.", + "type": "integer", + "nullable": true + }, + "speculative_eagle_topk": { + "title": "Speculative Eagle Topk", + "description": "The number of token sampled from draft model in eagle2 each step.", + "type": "integer", + "nullable": true + }, + "enable_double_sparsity": { + "title": "Enable Double Sparsity", + "description": "Enable double sparsity attention", + "type": "boolean", + "nullable": true + }, + "ds_channel_config_path": { + "title": "Ds Channel Config Path", + "description": "The path of the double sparsity channel config", + "type": "string", + "nullable": true + }, + "ds_heavy_channel_num": { + "title": "Ds Heavy Channel Num", + "description": "The number of heavy channels in double sparsity attention", + "type": "integer", + "nullable": true + }, + "ds_heavy_token_num": { + "title": "Ds Heavy Token Num", + "description": "The number of heavy tokens in double sparsity attention", + "type": "integer", + "nullable": true + }, + "ds_heavy_channel_type": { + "title": "Ds Heavy Channel Type", + "description": "The type of heavy channels in double sparsity attention", + "type": "string", + "nullable": true + }, + "ds_sparse_decode_threshold": { + "title": "Ds Sparse Decode Threshold", + "description": "The threshold for sparse decoding in double sparsity attention", + "type": "integer", + "nullable": true + }, + "disable_radix_cache": { + "title": "Disable Radix Cache", + "description": "Disable RadixAttention for prefix caching.", + "type": "boolean", + "nullable": true + }, + "disable_jump_forward": { + "title": "Disable Jump Forward", + "description": "Disable jump-forward for grammar-guided decoding.", + "type": "boolean", + "nullable": true + }, + "disable_cuda_graph": { + "title": "Disable Cuda Graph", + "description": "Disable cuda graph.", + "type": "boolean", + "nullable": true + }, + "disable_cuda_graph_padding": { + "title": "Disable Cuda Graph Padding", + "description": "Disable cuda graph when padding is needed.", + "type": "boolean", + "nullable": true + }, + "disable_outlines_disk_cache": { + "title": "Disable Outlines Disk Cache", + "description": "Disable disk cache of outlines.", + "type": "boolean", + "nullable": true + }, + "disable_custom_all_reduce": { + "title": "Disable Custom All Reduce", + "description": "Disable the custom all-reduce kernel.", + "type": "boolean", + "nullable": true + }, + "disable_mla": { + "title": "Disable Mla", + "description": "Disable Multi-head Latent Attention (MLA) for DeepSeek-V2.", + "type": "boolean", + "nullable": true + }, + "disable_overlap_schedule": { + "title": "Disable Overlap Schedule", + "description": "Disable the overlap scheduler.", + "type": "boolean", + "nullable": true + }, + "enable_mixed_chunk": { + "title": "Enable Mixed Chunk", + "description": "Enable mixing prefill and decode in a batch when using chunked prefill.", + "type": "boolean", + "nullable": true + }, + "enable_dp_attention": { + "title": "Enable Dp Attention", + "description": "Enable data parallelism for attention and tensor parallelism for FFN.", + "type": "boolean", + "nullable": true + }, + "enable_ep_moe": { + "title": "Enable Ep Moe", + "description": "Enable expert parallelism for moe.", + "type": "boolean", + "nullable": true + }, + "enable_torch_compile": { + "title": "Enable Torch Compile", + "description": "Optimize the model with torch.compile.", + "type": "boolean", + "nullable": true + }, + "torch_compile_max_bs": { + "title": "Torch Compile Max Bs", + "description": "Set the maximum batch size when using torch compile.", + "type": "integer", + "nullable": true + }, + "cuda_graph_max_bs": { + "title": "Cuda Graph Max Bs", + "description": "Set the maximum batch size for cuda graph.", + "type": "integer", + "nullable": true + }, + "cuda_graph_bs": { + "title": "Cuda Graph Bs", + "description": "Set the list of batch sizes for cuda graph.", + "items": { + "type": "integer" + }, + "type": "array", + "nullable": true + }, + "torchao_config": { + "title": "Torchao Config", + "description": "Optimize the model with torchao.", + "type": "string", + "nullable": true + }, + "enable_nan_detection": { + "title": "Enable Nan Detection", + "description": "Enable the NaN detection for debugging purposes.", + "type": "boolean", + "nullable": true + }, + "enable_p2p_check": { + "title": "Enable P2P Check", + "description": "Enable P2P check for GPU access.", + "type": "boolean", + "nullable": true + }, + "triton_attention_reduce_in_fp32": { + "title": "Triton Attention Reduce In Fp32", + "description": "Cast the intermediate attention results to fp32.", + "type": "boolean", + "nullable": true + }, + "triton_attention_num_kv_splits": { + "title": "Triton Attention Num Kv Splits", + "description": "The number of KV splits in flash decoding Triton kernel.", + "type": "integer", + "nullable": true + }, + "num_continuous_decode_steps": { + "title": "Num Continuous Decode Steps", + "description": "Run multiple continuous decoding steps to reduce scheduling overhead.", + "type": "integer", + "nullable": true + }, + "delete_ckpt_after_loading": { + "title": "Delete Ckpt After Loading", + "description": "Delete the model checkpoint after loading the model.", + "type": "boolean", + "nullable": true + }, + "enable_memory_saver": { + "title": "Enable Memory Saver", + "description": "Allow saving memory using release_memory_occupation and resume_memory_occupation", + "type": "boolean", + "nullable": true + }, + "allow_auto_truncate": { + "title": "Allow Auto Truncate", + "description": "Allow automatically truncating requests that exceed the maximum input length.", + "type": "boolean", + "nullable": true + }, + "enable_custom_logit_processor": { + "title": "Enable Custom Logit Processor", + "description": "Enable users to pass custom logit processors to the server.", + "type": "boolean", + "nullable": true + }, + "tool_call_parser": { + "title": "Tool Call Parser", + "description": "Specify the parser for handling tool-call interactions.", + "type": "string", + "nullable": true + }, + "huggingface_repo": { + "title": "Huggingface Repo", + "description": "The Hugging Face repository ID.", + "type": "string", + "nullable": true + } + }, + "type": "object", + "title": "UpdateSGLangModelEndpointRequest" + }, + "UpdateTextGenerationInferenceModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "model_name": { + "title": "Model Name", + "type": "string", + "nullable": true + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "nullable": true + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "text_generation_inference", + "enum": [ + "text_generation_inference" + ] + }, + "inference_framework_image_tag": { + "title": "Inference Framework Image Tag", + "type": "string", + "nullable": true + }, + "num_shards": { + "title": "Num Shards", + "type": "integer", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "force_bundle_recreation": { + "title": "Force Bundle Recreation", + "default": false, + "type": "boolean", + "nullable": true + }, + "min_workers": { + "title": "Min Workers", + "type": "integer", + "nullable": true + }, + "max_workers": { + "title": "Max Workers", + "type": "integer", + "nullable": true + }, + "per_worker": { + "title": "Per Worker", + "type": "integer", + "nullable": true + }, + "labels": { + "title": "Labels", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + } + }, + "type": "object", + "title": "UpdateTextGenerationInferenceModelEndpointRequest" + }, + "UpdateTriggerV1Request": { + "properties": { + "cron_schedule": { + "title": "Cron Schedule", + "type": "string", + "nullable": true + }, + "suspend": { + "title": "Suspend", + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "title": "UpdateTriggerV1Request" + }, + "UpdateTriggerV1Response": { + "properties": { + "success": { + "type": "boolean", + "title": "Success" + } + }, + "type": "object", + "required": [ + "success" + ], + "title": "UpdateTriggerV1Response" + }, + "UpdateVLLMModelEndpointRequest": { + "properties": { + "quantize": { + "$ref": "#/components/schemas/Quantization", + "nullable": true + }, + "checkpoint_path": { + "title": "Checkpoint Path", + "type": "string", + "nullable": true + }, + "post_inference_hooks": { + "title": "Post Inference Hooks", + "items": { + "type": "string" + }, + "type": "array", + "nullable": true + }, + "cpus": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Cpus", + "nullable": true + }, + "gpus": { + "title": "Gpus", + "type": "integer", + "nullable": true + }, + "memory": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Memory", + "nullable": true + }, + "gpu_type": { + "$ref": "#/components/schemas/GpuType", + "nullable": true + }, + "storage": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "number" + } + ], + "title": "Storage", + "nullable": true + }, + "nodes_per_worker": { + "title": "Nodes Per Worker", + "type": "integer", + "nullable": true + }, + "optimize_costs": { + "title": "Optimize Costs", + "type": "boolean", + "nullable": true + }, + "prewarm": { + "title": "Prewarm", + "type": "boolean", + "nullable": true + }, + "high_priority": { + "title": "High Priority", + "type": "boolean", + "nullable": true + }, + "billing_tags": { + "title": "Billing Tags", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "default_callback_url": { + "title": "Default Callback Url", + "type": "string", + "nullable": true + }, + "default_callback_auth": { + "$ref": "#/components/schemas/CallbackAuth", + "nullable": true + }, + "public_inference": { + "title": "Public Inference", + "default": true, + "type": "boolean", + "nullable": true + }, + "chat_template_override": { + "title": "Chat Template Override", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "enable_startup_metrics": { + "title": "Enable Startup Metrics", + "description": "Enable startup metrics collection via OpenTelemetry. When enabled, emits traces and metrics for download, Python init, and vLLM init phases.", + "default": false, + "type": "boolean", + "nullable": true + }, + "model_name": { + "title": "Model Name", + "type": "string", + "nullable": true + }, + "source": { + "$ref": "#/components/schemas/LLMSource", + "nullable": true + }, + "inference_framework": { + "type": "string", + "title": "Inference Framework", + "default": "vllm", + "enum": [ + "vllm" + ] + }, + "inference_framework_image_tag": { + "title": "Inference Framework Image Tag", + "type": "string", + "nullable": true + }, + "num_shards": { + "title": "Num Shards", + "type": "integer", + "nullable": true + }, + "metadata": { + "title": "Metadata", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "force_bundle_recreation": { + "title": "Force Bundle Recreation", + "default": false, + "type": "boolean", + "nullable": true + }, + "min_workers": { + "title": "Min Workers", + "type": "integer", + "nullable": true + }, + "max_workers": { + "title": "Max Workers", + "type": "integer", + "nullable": true + }, + "per_worker": { + "title": "Per Worker", + "type": "integer", + "nullable": true + }, + "labels": { + "title": "Labels", + "additionalProperties": { + "type": "string" + }, + "type": "object", + "nullable": true + }, + "max_gpu_memory_utilization": { + "title": "Max Gpu Memory Utilization", + "description": "Maximum GPU memory utilization for the batch inference. Default to 90%. Deprecated in favor of specifying this in VLLMModelConfig", + "type": "number", + "nullable": true + }, + "attention_backend": { + "title": "Attention Backend", + "description": "Attention backend to use for vLLM. Default to None.", + "type": "string", + "nullable": true + }, + "max_model_len": { + "title": "Max Model Len", + "description": "Model context length, If unspecified, will be automatically derived from the model config", + "type": "integer", + "nullable": true + }, + "max_num_seqs": { + "title": "Max Num Seqs", + "description": "Maximum number of sequences per iteration", + "type": "integer", + "nullable": true + }, + "enforce_eager": { + "title": "Enforce Eager", + "description": "Always use eager-mode PyTorch. If False, will use eager mode and CUDA graph in hybrid for maximal perforamnce and flexibility", + "type": "boolean", + "nullable": true + }, + "trust_remote_code": { + "title": "Trust Remote Code", + "description": "Whether to trust remote code from Hugging face hub. This is only applicable to models whose code is not supported natively by the transformers library (e.g. deepseek). Default to False.", + "default": false, + "type": "boolean", + "nullable": true + }, + "pipeline_parallel_size": { + "title": "Pipeline Parallel Size", + "description": "Number of pipeline stages. Default to None.", + "type": "integer", + "nullable": true + }, + "tensor_parallel_size": { + "title": "Tensor Parallel Size", + "description": "Number of tensor parallel replicas. Default to None.", + "type": "integer", + "nullable": true + }, + "quantization": { + "title": "Quantization", + "description": "Method used to quantize the weights. If None, we first check the `quantization_config` attribute in the model config file. If that is None, we assume the model weights are not quantized and use `dtype` to determine the data type of the weights.", + "type": "string", + "nullable": true + }, + "disable_log_requests": { + "title": "Disable Log Requests", + "description": "Disable logging requests. Default to None.", + "type": "boolean", + "nullable": true + }, + "chat_template": { + "title": "Chat Template", + "description": "A Jinja template to use for this endpoint. If not provided, will use the chat template from the checkpoint", + "type": "string", + "nullable": true + }, + "tool_call_parser": { + "title": "Tool Call Parser", + "description": "Tool call parser", + "type": "string", + "nullable": true + }, + "enable_auto_tool_choice": { + "title": "Enable Auto Tool Choice", + "description": "Enable auto tool choice", + "type": "boolean", + "nullable": true + }, + "load_format": { + "title": "Load Format", + "description": "The format of the model weights to load.\n\n* \"auto\" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.\n* \"pt\" will load the weights in the pytorch bin format.\n* \"safetensors\" will load the weights in the safetensors format.\n* \"npcache\" will load the weights in pytorch format and store a numpy cache to speed up the loading.\n* \"dummy\" will initialize the weights with random values, which is mainly for profiling.\n* \"tensorizer\" will load the weights using tensorizer from CoreWeave. See the Tensorize vLLM Model script in the Examples section for more information.\n* \"bitsandbytes\" will load the weights using bitsandbytes quantization.\n", + "type": "string", + "nullable": true + }, + "config_format": { + "title": "Config Format", + "description": "The config format which shall be loaded. Defaults to 'auto' which defaults to 'hf'.", + "type": "string", + "nullable": true + }, + "tokenizer_mode": { + "title": "Tokenizer Mode", + "description": "Tokenizer mode. 'auto' will use the fast tokenizer ifavailable, 'slow' will always use the slow tokenizer, and'mistral' will always use the tokenizer from `mistral_common`.", + "type": "string", + "nullable": true + }, + "limit_mm_per_prompt": { + "title": "Limit Mm Per Prompt", + "description": "Maximum number of data instances per modality per prompt. Only applicable for multimodal models.", + "type": "string", + "nullable": true + }, + "max_num_batched_tokens": { + "title": "Max Num Batched Tokens", + "description": "Maximum number of batched tokens per iteration", + "type": "integer", + "nullable": true + }, + "tokenizer": { + "title": "Tokenizer", + "description": "Name or path of the huggingface tokenizer to use.", + "type": "string", + "nullable": true + }, + "dtype": { + "title": "Dtype", + "description": "Data type for model weights and activations. The 'auto' option will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.", + "type": "string", + "nullable": true + }, + "seed": { + "title": "Seed", + "description": "Random seed for reproducibility.", + "type": "integer", + "nullable": true + }, + "revision": { + "title": "Revision", + "description": "The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "code_revision": { + "title": "Code Revision", + "description": "The specific revision to use for the model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "rope_scaling": { + "title": "Rope Scaling", + "description": "Dictionary containing the scaling configuration for the RoPE embeddings. When using this flag, don't update `max_position_embeddings` to the expected new maximum.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "tokenizer_revision": { + "title": "Tokenizer Revision", + "description": "The specific tokenizer version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.", + "type": "string", + "nullable": true + }, + "quantization_param_path": { + "title": "Quantization Param Path", + "description": "Path to JSON file containing scaling factors. Used to load KV cache scaling factors into the model when KV cache type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also be used to load activation and weight scaling factors when the model dtype is FP8_E4M3 on ROCm.", + "type": "string", + "nullable": true + }, + "max_seq_len_to_capture": { + "title": "Max Seq Len To Capture", + "description": "Maximum sequence len covered by CUDA graphs. When a sequence has context length larger than this, we fall back to eager mode. Additionally for encoder-decoder models, if the sequence length of the encoder input is larger than this, we fall back to the eager mode.", + "type": "integer", + "nullable": true + }, + "disable_sliding_window": { + "title": "Disable Sliding Window", + "description": "Whether to disable sliding window. If True, we will disable the sliding window functionality of the model. If the model does not support sliding window, this argument is ignored.", + "type": "boolean", + "nullable": true + }, + "skip_tokenizer_init": { + "title": "Skip Tokenizer Init", + "description": "If true, skip initialization of tokenizer and detokenizer.", + "type": "boolean", + "nullable": true + }, + "served_model_name": { + "title": "Served Model Name", + "description": "The model name used in metrics tag `model_name`, matches the model name exposed via the APIs. If multiple model names provided, the first name will be used. If not specified, the model name will be the same as `model`.", + "type": "string", + "nullable": true + }, + "override_neuron_config": { + "title": "Override Neuron Config", + "description": "Initialize non default neuron config or override default neuron config that are specific to Neuron devices, this argument will be used to configure the neuron config that can not be gathered from the vllm arguments.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "mm_processor_kwargs": { + "title": "Mm Processor Kwargs", + "description": "Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor.", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "block_size": { + "title": "Block Size", + "description": "Size of a cache block in number of tokens.", + "type": "integer", + "nullable": true + }, + "gpu_memory_utilization": { + "title": "Gpu Memory Utilization", + "description": "Fraction of GPU memory to use for the vLLM execution.", + "type": "number", + "nullable": true + }, + "swap_space": { + "title": "Swap Space", + "description": "Size of the CPU swap space per GPU (in GiB).", + "type": "number", + "nullable": true + }, + "cache_dtype": { + "title": "Cache Dtype", + "description": "Data type for kv cache storage.", + "type": "string", + "nullable": true + }, + "num_gpu_blocks_override": { + "title": "Num Gpu Blocks Override", + "description": "Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None.", + "type": "integer", + "nullable": true + }, + "enable_prefix_caching": { + "title": "Enable Prefix Caching", + "description": "Enables automatic prefix caching.", + "type": "boolean", + "nullable": true + } + }, + "type": "object", + "title": "UpdateVLLMModelEndpointRequest" + }, + "UploadFileResponse": { + "properties": { + "id": { + "type": "string", + "title": "Id", + "description": "ID of the uploaded file." + } + }, + "type": "object", + "required": [ + "id" + ], + "title": "UploadFileResponse", + "description": "Response object for uploading a file." + }, + "UrlCitation": { + "properties": { + "end_index": { + "type": "integer", + "title": "End Index", + "description": "The index of the last character of the URL citation in the message." + }, + "start_index": { + "type": "integer", + "title": "Start Index", + "description": "The index of the first character of the URL citation in the message." + }, + "url": { + "type": "string", + "title": "Url", + "description": "The URL of the web resource." + }, + "title": { + "type": "string", + "title": "Title", + "description": "The title of the web resource." + } + }, + "type": "object", + "required": [ + "end_index", + "start_index", + "url", + "title" + ], + "title": "UrlCitation" + }, + "UserLocation": { + "properties": { + "type": { + "type": "string", + "title": "Type", + "description": "The type of location approximation. Always `approximate`.\n", + "enum": [ + "approximate" + ] + }, + "approximate": { + "$ref": "#/components/schemas/WebSearchLocation" + } + }, + "type": "object", + "required": [ + "type", + "approximate" + ], + "title": "UserLocation" + }, + "ValidationError": { + "properties": { + "loc": { + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + }, + "type": "array", + "title": "Location" + }, + "msg": { + "type": "string", + "title": "Message" + }, + "type": { + "type": "string", + "title": "Error Type" + } + }, + "type": "object", + "required": [ + "loc", + "msg", + "type" + ], + "title": "ValidationError" + }, + "VoiceIdsShared": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "string", + "enum": [ + "alloy", + "ash", + "ballad", + "coral", + "echo", + "fable", + "onyx", + "nova", + "sage", + "shimmer", + "verse" + ] + } + ], + "title": "VoiceIdsShared" + }, + "WebSearchContextSize": { + "type": "string", + "enum": [ + "low", + "medium", + "high" + ], + "title": "WebSearchContextSize", + "description": "High level guidance for the amount of context window space to use for the \nsearch. One of `low`, `medium`, or `high`. `medium` is the default.\n" + }, + "WebSearchLocation": { + "properties": { + "country": { + "title": "Country", + "description": "The two-letter \n[ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of the user,\ne.g. `US`.\n", + "type": "string", + "nullable": true + }, + "region": { + "title": "Region", + "description": "Free text input for the region of the user, e.g. `California`.\n", + "type": "string", + "nullable": true + }, + "city": { + "title": "City", + "description": "Free text input for the city of the user, e.g. `San Francisco`.\n", + "type": "string", + "nullable": true + }, + "timezone": { + "title": "Timezone", + "description": "The [IANA timezone](https://timeapi.io/documentation/iana-timezones) \nof the user, e.g. `America/Los_Angeles`.\n", + "type": "string", + "nullable": true + } + }, + "type": "object", + "title": "WebSearchLocation" + }, + "WebSearchOptions": { + "properties": { + "user_location": { + "description": "Approximate location parameters for the search.\n", + "$ref": "#/components/schemas/UserLocation", + "nullable": true + }, + "search_context_size": { + "$ref": "#/components/schemas/WebSearchContextSize", + "nullable": true + } + }, + "type": "object", + "title": "WebSearchOptions" + }, + "ZipArtifactFlavor": { + "properties": { + "requirements": { + "items": { + "type": "string" + }, + "type": "array", + "title": "Requirements" + }, + "framework": { + "oneOf": [ + { + "$ref": "#/components/schemas/PytorchFramework" + }, + { + "$ref": "#/components/schemas/TensorflowFramework" + }, + { + "$ref": "#/components/schemas/CustomFramework" + } + ], + "title": "Framework", + "discriminator": { + "propertyName": "framework_type", + "mapping": { + "custom_base_image": "#/components/schemas/CustomFramework", + "pytorch": "#/components/schemas/PytorchFramework", + "tensorflow": "#/components/schemas/TensorflowFramework" + } + } + }, + "app_config": { + "title": "App Config", + "additionalProperties": true, + "type": "object", + "nullable": true + }, + "location": { + "type": "string", + "title": "Location" + }, + "flavor": { + "type": "string", + "title": "Flavor", + "enum": [ + "zip_artifact" + ] + }, + "load_predict_fn_module_path": { + "type": "string", + "title": "Load Predict Fn Module Path" + }, + "load_model_fn_module_path": { + "type": "string", + "title": "Load Model Fn Module Path" + } + }, + "type": "object", + "required": [ + "requirements", + "framework", + "location", + "flavor", + "load_predict_fn_module_path", + "load_model_fn_module_path" + ], + "title": "ZipArtifactFlavor", + "description": "This is the entity-layer class for the Model Bundle flavor of a zip artifact." + }, + "CreateLLMModelEndpointV1Request": { + "oneOf": [ + { + "$ref": "#/components/schemas/CreateVLLMModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/CreateSGLangModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/CreateDeepSpeedModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/CreateTextGenerationInferenceModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/CreateLightLLMModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/CreateTensorRTLLMModelEndpointRequest" + } + ], + "title": "RootModel[Annotated[Union[Annotated[CreateVLLMModelEndpointRequest, Tag], Annotated[CreateSGLangModelEndpointRequest, Tag], Annotated[CreateDeepSpeedModelEndpointRequest, Tag], Annotated[CreateTextGenerationInferenceModelEndpointRequest, Tag], Annotated[CreateLightLLMModelEndpointRequest, Tag], Annotated[CreateTensorRTLLMModelEndpointRequest, Tag]], Discriminator]]" + }, + "UpdateLLMModelEndpointV1Request": { + "oneOf": [ + { + "$ref": "#/components/schemas/UpdateVLLMModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/UpdateSGLangModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/UpdateDeepSpeedModelEndpointRequest" + }, + { + "$ref": "#/components/schemas/UpdateTextGenerationInferenceModelEndpointRequest" + } + ], + "title": "RootModel[Annotated[Union[Annotated[UpdateVLLMModelEndpointRequest, Tag], Annotated[UpdateSGLangModelEndpointRequest, Tag], Annotated[UpdateDeepSpeedModelEndpointRequest, Tag], Annotated[UpdateTextGenerationInferenceModelEndpointRequest, Tag]], Discriminator]]" + } + }, + "securitySchemes": { + "HTTPBasic": { + "type": "http", + "scheme": "basic" + }, + "OAuth2PasswordBearer": { + "type": "oauth2", + "flows": { + "password": { + "scopes": {}, + "tokenUrl": "token" + } + } + } + } + } +} \ No newline at end of file diff --git a/openapitools.json b/openapitools.json new file mode 100644 index 00000000..b99f7e67 --- /dev/null +++ b/openapitools.json @@ -0,0 +1,22 @@ +{ + "$schema": "./node_modules/@openapitools/openapi-generator-cli/config.schema.json", + "spaces": 2, + "generator-cli": { + "version": "6.4.0", + "generators": { + "python": { + "generatorName": "python", + "output": "#{cwd}", + "glob": "openapi.json", + "packageName": "launch.api_client", + "additionalProperties": { + "packageVersion": "1.1.2" + }, + "globalProperty": { + "skipFormModel": false + }, + "skipValidateSpec": true + } + } + } +} diff --git a/pyproject.toml b/pyproject.toml index ec2c0b02..2a4a6d2c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,7 @@ exclude = ''' | buck-out | build | dist + | launch/api_client # generated code )/ ) ''' @@ -27,7 +28,7 @@ exclude = [ [tool.poetry] name = "scale-launch" -version = "1.1.0" +version = "0.4.0" description = "The official Python client library for Launch, the Data Platform for AI" authors = ["Your Name "] readme = "README.md" diff --git a/test/test_paths/__init__.py b/test/test_paths/__init__.py new file mode 100644 index 00000000..02b36375 --- /dev/null +++ b/test/test_paths/__init__.py @@ -0,0 +1,57 @@ +import json +import typing + +import urllib3 +from urllib3._collections import HTTPHeaderDict + + +class ApiTestMixin: + json_content_type = "application/json" + user_agent = "OpenAPI-Generator/1.0.0/python" + + @classmethod + def assert_pool_manager_request_called_with( + cls, + mock_request, + url: str, + method: str = "POST", + body: typing.Optional[bytes] = None, + content_type: typing.Optional[str] = None, + accept_content_type: typing.Optional[str] = None, + stream: bool = False, + ): + headers = {"User-Agent": cls.user_agent} + if accept_content_type: + headers["Accept"] = accept_content_type + if content_type: + headers["Content-Type"] = content_type + kwargs = dict( + headers=HTTPHeaderDict(headers), + preload_content=not stream, + timeout=None, + ) + if content_type and method != "GET": + kwargs["body"] = body + mock_request.assert_called_with(method, url, **kwargs) + + @staticmethod + def headers_for_content_type(content_type: str) -> typing.Dict[str, str]: + return {"content-type": content_type} + + @classmethod + def response( + cls, + body: typing.Union[str, bytes], + status: int = 200, + content_type: str = json_content_type, + headers: typing.Optional[typing.Dict[str, str]] = None, + preload_content: bool = True, + ) -> urllib3.HTTPResponse: + if headers is None: + headers = {} + headers.update(cls.headers_for_content_type(content_type)) + return urllib3.HTTPResponse(body, headers=headers, status=status, preload_content=preload_content) + + @staticmethod + def json_bytes(in_data: typing.Any) -> bytes: + return json.dumps(in_data, separators=(",", ":"), ensure_ascii=False).encode("utf-8") diff --git a/test/test_paths/test_healthcheck/__init__.py b/test/test_paths/test_healthcheck/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_healthcheck/test_get.py b/test/test_paths/test_healthcheck/test_get.py new file mode 100644 index 00000000..369e5a38 --- /dev/null +++ b/test/test_paths/test_healthcheck/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.healthcheck import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestHealthcheck(ApiTestMixin, unittest.TestCase): + """ + Healthcheck unit test stubs + Healthcheck # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_healthz/__init__.py b/test/test_paths/test_healthz/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_healthz/test_get.py b/test/test_paths/test_healthz/test_get.py new file mode 100644 index 00000000..a4be4c09 --- /dev/null +++ b/test/test_paths/test_healthz/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.healthz import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestHealthz(ApiTestMixin, unittest.TestCase): + """ + Healthz unit test stubs + Healthcheck # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_readyz/__init__.py b/test/test_paths/test_readyz/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_readyz/test_get.py b/test/test_paths/test_readyz/test_get.py new file mode 100644 index 00000000..8876d438 --- /dev/null +++ b/test/test_paths/test_readyz/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.readyz import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestReadyz(ApiTestMixin, unittest.TestCase): + """ + Readyz unit test stubs + Healthcheck # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_async_tasks/__init__.py b/test/test_paths/test_v1_async_tasks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_async_tasks/test_post.py b/test/test_paths/test_v1_async_tasks/test_post.py new file mode 100644 index 00000000..af187d92 --- /dev/null +++ b/test/test_paths/test_v1_async_tasks/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_async_tasks import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1AsyncTasks(ApiTestMixin, unittest.TestCase): + """ + V1AsyncTasks unit test stubs + Create Async Inference Task # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_async_tasks_task_id/__init__.py b/test/test_paths/test_v1_async_tasks_task_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_async_tasks_task_id/test_get.py b/test/test_paths/test_v1_async_tasks_task_id/test_get.py new file mode 100644 index 00000000..99311617 --- /dev/null +++ b/test/test_paths/test_v1_async_tasks_task_id/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_async_tasks_task_id import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1AsyncTasksTaskId(ApiTestMixin, unittest.TestCase): + """ + V1AsyncTasksTaskId unit test stubs + Get Async Inference Task # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_batch_jobs/__init__.py b/test/test_paths/test_v1_batch_jobs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_batch_jobs/test_post.py b/test/test_paths/test_v1_batch_jobs/test_post.py new file mode 100644 index 00000000..5edcd2ab --- /dev/null +++ b/test/test_paths/test_v1_batch_jobs/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_batch_jobs import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1BatchJobs(ApiTestMixin, unittest.TestCase): + """ + V1BatchJobs unit test stubs + Create Batch Job # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_batch_jobs_batch_job_id/__init__.py b/test/test_paths/test_v1_batch_jobs_batch_job_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_batch_jobs_batch_job_id/test_get.py b/test/test_paths/test_v1_batch_jobs_batch_job_id/test_get.py new file mode 100644 index 00000000..2ba46baa --- /dev/null +++ b/test/test_paths/test_v1_batch_jobs_batch_job_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_batch_jobs_batch_job_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1BatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): + """ + V1BatchJobsBatchJobId unit test stubs + Get Batch Job # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_batch_jobs_batch_job_id/test_put.py b/test/test_paths/test_v1_batch_jobs_batch_job_id/test_put.py new file mode 100644 index 00000000..c07ed01b --- /dev/null +++ b/test/test_paths/test_v1_batch_jobs_batch_job_id/test_put.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_batch_jobs_batch_job_id import ( # noqa: E501 + put, +) + +from .. import ApiTestMixin + + +class TestV1BatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): + """ + V1BatchJobsBatchJobId unit test stubs + Update Batch Job # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles/__init__.py b/test/test_paths/test_v1_docker_image_batch_job_bundles/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles/test_get.py b/test/test_paths/test_v1_docker_image_batch_job_bundles/test_get.py new file mode 100644 index 00000000..2afaed5f --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_job_bundles/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_job_bundles import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobBundles(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobBundles unit test stubs + List Docker Image Batch Job Model Bundles # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles/test_post.py b/test/test_paths/test_v1_docker_image_batch_job_bundles/test_post.py new file mode 100644 index 00000000..a93143e9 --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_job_bundles/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_job_bundles import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobBundles(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobBundles unit test stubs + Create Docker Image Batch Job Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/test_get.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/test_get.py new file mode 100644 index 00000000..fd33431b --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_job_bundles_docker_image_batch_job_bundle_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobBundlesDockerImageBatchJobBundleId(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobBundlesDockerImageBatchJobBundleId unit test stubs + Get Docker Image Batch Job Model Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/__init__.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/test_get.py b/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/test_get.py new file mode 100644 index 00000000..e82be891 --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_job_bundles_latest/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_job_bundles_latest import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobBundlesLatest(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobBundlesLatest unit test stubs + Get Latest Docker Image Batch Job Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs/__init__.py b/test/test_paths/test_v1_docker_image_batch_jobs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_docker_image_batch_jobs/test_get.py b/test/test_paths/test_v1_docker_image_batch_jobs/test_get.py new file mode 100644 index 00000000..1ac9f998 --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_jobs/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_jobs import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobs(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobs unit test stubs + List Docker Image Batch Jobs # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs/test_post.py b/test/test_paths/test_v1_docker_image_batch_jobs/test_post.py new file mode 100644 index 00000000..86b27b1a --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_jobs/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_jobs import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobs(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobs unit test stubs + Create Docker Image Batch Job # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/__init__.py b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_get.py b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_get.py new file mode 100644 index 00000000..1e507744 --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobsBatchJobId unit test stubs + Get Docker Image Batch Job # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_put.py b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_put.py new file mode 100644 index 00000000..3e08b35d --- /dev/null +++ b/test/test_paths/test_v1_docker_image_batch_jobs_batch_job_id/test_put.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_docker_image_batch_jobs_batch_job_id import ( # noqa: E501 + put, +) + +from .. import ApiTestMixin + + +class TestV1DockerImageBatchJobsBatchJobId(ApiTestMixin, unittest.TestCase): + """ + V1DockerImageBatchJobsBatchJobId unit test stubs + Update Docker Image Batch Job # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_files/__init__.py b/test/test_paths/test_v1_files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_files/test_get.py b/test/test_paths/test_v1_files/test_get.py new file mode 100644 index 00000000..5232da4a --- /dev/null +++ b/test/test_paths/test_v1_files/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_files import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1Files(ApiTestMixin, unittest.TestCase): + """ + V1Files unit test stubs + List Files # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_files/test_post.py b/test/test_paths/test_v1_files/test_post.py new file mode 100644 index 00000000..32a1f002 --- /dev/null +++ b/test/test_paths/test_v1_files/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_files import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1Files(ApiTestMixin, unittest.TestCase): + """ + V1Files unit test stubs + Upload File # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_files_file_id/__init__.py b/test/test_paths/test_v1_files_file_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_files_file_id/test_delete.py b/test/test_paths/test_v1_files_file_id/test_delete.py new file mode 100644 index 00000000..f87b940f --- /dev/null +++ b/test/test_paths/test_v1_files_file_id/test_delete.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_files_file_id import delete # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1FilesFileId(ApiTestMixin, unittest.TestCase): + """ + V1FilesFileId unit test stubs + Delete File # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_files_file_id/test_get.py b/test/test_paths/test_v1_files_file_id/test_get.py new file mode 100644 index 00000000..63e5f30e --- /dev/null +++ b/test/test_paths/test_v1_files_file_id/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_files_file_id import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1FilesFileId(ApiTestMixin, unittest.TestCase): + """ + V1FilesFileId unit test stubs + Get File # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_files_file_id_content/__init__.py b/test/test_paths/test_v1_files_file_id_content/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_files_file_id_content/test_get.py b/test/test_paths/test_v1_files_file_id_content/test_get.py new file mode 100644 index 00000000..530b06cb --- /dev/null +++ b/test/test_paths/test_v1_files_file_id_content/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_files_file_id_content import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1FilesFileIdContent(ApiTestMixin, unittest.TestCase): + """ + V1FilesFileIdContent unit test stubs + Get File Content # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_batch_completions/__init__.py b/test/test_paths/test_v1_llm_batch_completions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_batch_completions/test_post.py b/test/test_paths/test_v1_llm_batch_completions/test_post.py new file mode 100644 index 00000000..cd9ae6e0 --- /dev/null +++ b/test/test_paths/test_v1_llm_batch_completions/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_batch_completions import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1LlmBatchCompletions(ApiTestMixin, unittest.TestCase): + """ + V1LlmBatchCompletions unit test stubs + Create Batch Completions # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_completions_stream/__init__.py b/test/test_paths/test_v1_llm_completions_stream/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_completions_stream/test_post.py b/test/test_paths/test_v1_llm_completions_stream/test_post.py new file mode 100644 index 00000000..a1416702 --- /dev/null +++ b/test/test_paths/test_v1_llm_completions_stream/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_completions_stream import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV1LlmCompletionsStream(ApiTestMixin, unittest.TestCase): + """ + V1LlmCompletionsStream unit test stubs + Create Completion Stream Task # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_completions_sync/__init__.py b/test/test_paths/test_v1_llm_completions_sync/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_completions_sync/test_post.py b/test/test_paths/test_v1_llm_completions_sync/test_post.py new file mode 100644 index 00000000..2b04e8ca --- /dev/null +++ b/test/test_paths/test_v1_llm_completions_sync/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_completions_sync import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1LlmCompletionsSync(ApiTestMixin, unittest.TestCase): + """ + V1LlmCompletionsSync unit test stubs + Create Completion Sync Task # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes/__init__.py b/test/test_paths/test_v1_llm_fine_tunes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_fine_tunes/test_get.py b/test/test_paths/test_v1_llm_fine_tunes/test_get.py new file mode 100644 index 00000000..eab298e4 --- /dev/null +++ b/test/test_paths/test_v1_llm_fine_tunes/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_fine_tunes import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1LlmFineTunes(ApiTestMixin, unittest.TestCase): + """ + V1LlmFineTunes unit test stubs + List Fine Tunes # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes/test_post.py b/test/test_paths/test_v1_llm_fine_tunes/test_post.py new file mode 100644 index 00000000..f0cc0e07 --- /dev/null +++ b/test/test_paths/test_v1_llm_fine_tunes/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_fine_tunes import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1LlmFineTunes(ApiTestMixin, unittest.TestCase): + """ + V1LlmFineTunes unit test stubs + Create Fine Tune # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/__init__.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/test_get.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/test_get.py new file mode 100644 index 00000000..4d45ad16 --- /dev/null +++ b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1LlmFineTunesFineTuneId(ApiTestMixin, unittest.TestCase): + """ + V1LlmFineTunesFineTuneId unit test stubs + Get Fine Tune # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/test_put.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/test_put.py new file mode 100644 index 00000000..0c962326 --- /dev/null +++ b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_cancel/test_put.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_cancel import ( # noqa: E501 + put, +) + +from .. import ApiTestMixin + + +class TestV1LlmFineTunesFineTuneIdCancel(ApiTestMixin, unittest.TestCase): + """ + V1LlmFineTunesFineTuneIdCancel unit test stubs + Cancel Fine Tune # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/__init__.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/test_get.py b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/test_get.py new file mode 100644 index 00000000..d5470fef --- /dev/null +++ b/test/test_paths/test_v1_llm_fine_tunes_fine_tune_id_events/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_fine_tunes_fine_tune_id_events import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1LlmFineTunesFineTuneIdEvents(ApiTestMixin, unittest.TestCase): + """ + V1LlmFineTunesFineTuneIdEvents unit test stubs + Get Fine Tune Events # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints/__init__.py b/test/test_paths/test_v1_llm_model_endpoints/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_model_endpoints/test_get.py b/test/test_paths/test_v1_llm_model_endpoints/test_get.py new file mode 100644 index 00000000..c50c4555 --- /dev/null +++ b/test/test_paths/test_v1_llm_model_endpoints/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_model_endpoints import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1LlmModelEndpoints(ApiTestMixin, unittest.TestCase): + """ + V1LlmModelEndpoints unit test stubs + List Model Endpoints # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints/test_post.py b/test/test_paths/test_v1_llm_model_endpoints/test_post.py new file mode 100644 index 00000000..3981e312 --- /dev/null +++ b/test/test_paths/test_v1_llm_model_endpoints/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_model_endpoints import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1LlmModelEndpoints(ApiTestMixin, unittest.TestCase): + """ + V1LlmModelEndpoints unit test stubs + Create Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_download/__init__.py b/test/test_paths/test_v1_llm_model_endpoints_download/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_model_endpoints_download/test_post.py b/test/test_paths/test_v1_llm_model_endpoints_download/test_post.py new file mode 100644 index 00000000..f1fb9111 --- /dev/null +++ b/test/test_paths/test_v1_llm_model_endpoints_download/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_model_endpoints_download import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV1LlmModelEndpointsDownload(ApiTestMixin, unittest.TestCase): + """ + V1LlmModelEndpointsDownload unit test stubs + Download Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/__init__.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_delete.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_delete.py new file mode 100644 index 00000000..455d45e9 --- /dev/null +++ b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_delete.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name import ( # noqa: E501 + delete, +) + +from .. import ApiTestMixin + + +class TestV1LlmModelEndpointsModelEndpointName(ApiTestMixin, unittest.TestCase): + """ + V1LlmModelEndpointsModelEndpointName unit test stubs + Delete Llm Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_get.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_get.py new file mode 100644 index 00000000..00c8b86e --- /dev/null +++ b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1LlmModelEndpointsModelEndpointName(ApiTestMixin, unittest.TestCase): + """ + V1LlmModelEndpointsModelEndpointName unit test stubs + Get Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_put.py b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_put.py new file mode 100644 index 00000000..0b90da8e --- /dev/null +++ b/test/test_paths/test_v1_llm_model_endpoints_model_endpoint_name/test_put.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_llm_model_endpoints_model_endpoint_name import ( # noqa: E501 + put, +) + +from .. import ApiTestMixin + + +class TestV1LlmModelEndpointsModelEndpointName(ApiTestMixin, unittest.TestCase): + """ + V1LlmModelEndpointsModelEndpointName unit test stubs + Update Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_bundles/__init__.py b/test/test_paths/test_v1_model_bundles/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_bundles/test_get.py b/test/test_paths/test_v1_model_bundles/test_get.py new file mode 100644 index 00000000..9a3f702d --- /dev/null +++ b/test/test_paths/test_v1_model_bundles/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_bundles import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1ModelBundles(ApiTestMixin, unittest.TestCase): + """ + V1ModelBundles unit test stubs + List Model Bundles # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_bundles/test_post.py b/test/test_paths/test_v1_model_bundles/test_post.py new file mode 100644 index 00000000..a57ac049 --- /dev/null +++ b/test/test_paths/test_v1_model_bundles/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_bundles import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1ModelBundles(ApiTestMixin, unittest.TestCase): + """ + V1ModelBundles unit test stubs + Create Model Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_bundles_clone_with_changes/__init__.py b/test/test_paths/test_v1_model_bundles_clone_with_changes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_bundles_clone_with_changes/test_post.py b/test/test_paths/test_v1_model_bundles_clone_with_changes/test_post.py new file mode 100644 index 00000000..94e37019 --- /dev/null +++ b/test/test_paths/test_v1_model_bundles_clone_with_changes/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_bundles_clone_with_changes import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV1ModelBundlesCloneWithChanges(ApiTestMixin, unittest.TestCase): + """ + V1ModelBundlesCloneWithChanges unit test stubs + Clone Model Bundle With Changes # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_bundles_latest/__init__.py b/test/test_paths/test_v1_model_bundles_latest/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_bundles_latest/test_get.py b/test/test_paths/test_v1_model_bundles_latest/test_get.py new file mode 100644 index 00000000..cc964745 --- /dev/null +++ b/test/test_paths/test_v1_model_bundles_latest/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_bundles_latest import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1ModelBundlesLatest(ApiTestMixin, unittest.TestCase): + """ + V1ModelBundlesLatest unit test stubs + Get Latest Model Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_bundles_model_bundle_id/__init__.py b/test/test_paths/test_v1_model_bundles_model_bundle_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_bundles_model_bundle_id/test_get.py b/test/test_paths/test_v1_model_bundles_model_bundle_id/test_get.py new file mode 100644 index 00000000..602fe975 --- /dev/null +++ b/test/test_paths/test_v1_model_bundles_model_bundle_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_bundles_model_bundle_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1ModelBundlesModelBundleId(ApiTestMixin, unittest.TestCase): + """ + V1ModelBundlesModelBundleId unit test stubs + Get Model Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints/__init__.py b/test/test_paths/test_v1_model_endpoints/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_endpoints/test_get.py b/test/test_paths/test_v1_model_endpoints/test_get.py new file mode 100644 index 00000000..c000bf21 --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1ModelEndpoints(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpoints unit test stubs + List Model Endpoints # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints/test_post.py b/test/test_paths/test_v1_model_endpoints/test_post.py new file mode 100644 index 00000000..880fdbde --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1ModelEndpoints(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpoints unit test stubs + Create Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_api/__init__.py b/test/test_paths/test_v1_model_endpoints_api/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_endpoints_api/test_get.py b/test/test_paths/test_v1_model_endpoints_api/test_get.py new file mode 100644 index 00000000..d88851d1 --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints_api/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints_api import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1ModelEndpointsApi(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpointsApi unit test stubs + Get Model Endpoints Api # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/__init__.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_delete.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_delete.py new file mode 100644 index 00000000..5cbb222b --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_delete.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints_model_endpoint_id import ( # noqa: E501 + delete, +) + +from .. import ApiTestMixin + + +class TestV1ModelEndpointsModelEndpointId(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpointsModelEndpointId unit test stubs + Delete Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_get.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_get.py new file mode 100644 index 00000000..9e6ff427 --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints_model_endpoint_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1ModelEndpointsModelEndpointId(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpointsModelEndpointId unit test stubs + Get Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_put.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_put.py new file mode 100644 index 00000000..4be6978d --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints_model_endpoint_id/test_put.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints_model_endpoint_id import ( # noqa: E501 + put, +) + +from .. import ApiTestMixin + + +class TestV1ModelEndpointsModelEndpointId(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpointsModelEndpointId unit test stubs + Update Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/__init__.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/test_post.py b/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/test_post.py new file mode 100644 index 00000000..79790def --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints_model_endpoint_id_restart/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints_model_endpoint_id_restart import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV1ModelEndpointsModelEndpointIdRestart(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpointsModelEndpointIdRestart unit test stubs + Restart Model Endpoint # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_model_endpoints_schema_json/__init__.py b/test/test_paths/test_v1_model_endpoints_schema_json/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_model_endpoints_schema_json/test_get.py b/test/test_paths/test_v1_model_endpoints_schema_json/test_get.py new file mode 100644 index 00000000..09f6fb0c --- /dev/null +++ b/test/test_paths/test_v1_model_endpoints_schema_json/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_model_endpoints_schema_json import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV1ModelEndpointsSchemaJson(ApiTestMixin, unittest.TestCase): + """ + V1ModelEndpointsSchemaJson unit test stubs + Get Model Endpoints Schema # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_streaming_tasks/__init__.py b/test/test_paths/test_v1_streaming_tasks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_streaming_tasks/test_post.py b/test/test_paths/test_v1_streaming_tasks/test_post.py new file mode 100644 index 00000000..89394b48 --- /dev/null +++ b/test/test_paths/test_v1_streaming_tasks/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_streaming_tasks import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1StreamingTasks(ApiTestMixin, unittest.TestCase): + """ + V1StreamingTasks unit test stubs + Create Streaming Inference Task # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_sync_tasks/__init__.py b/test/test_paths/test_v1_sync_tasks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_sync_tasks/test_post.py b/test/test_paths/test_v1_sync_tasks/test_post.py new file mode 100644 index 00000000..09bd6339 --- /dev/null +++ b/test/test_paths/test_v1_sync_tasks/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_sync_tasks import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1SyncTasks(ApiTestMixin, unittest.TestCase): + """ + V1SyncTasks unit test stubs + Create Sync Inference Task # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_triggers/__init__.py b/test/test_paths/test_v1_triggers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_triggers/test_get.py b/test/test_paths/test_v1_triggers/test_get.py new file mode 100644 index 00000000..a45b402d --- /dev/null +++ b/test/test_paths/test_v1_triggers/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_triggers import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1Triggers(ApiTestMixin, unittest.TestCase): + """ + V1Triggers unit test stubs + List Triggers # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_triggers/test_post.py b/test/test_paths/test_v1_triggers/test_post.py new file mode 100644 index 00000000..a27a778a --- /dev/null +++ b/test/test_paths/test_v1_triggers/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_triggers import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1Triggers(ApiTestMixin, unittest.TestCase): + """ + V1Triggers unit test stubs + Create Trigger # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_triggers_trigger_id/__init__.py b/test/test_paths/test_v1_triggers_trigger_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v1_triggers_trigger_id/test_delete.py b/test/test_paths/test_v1_triggers_trigger_id/test_delete.py new file mode 100644 index 00000000..48efdb01 --- /dev/null +++ b/test/test_paths/test_v1_triggers_trigger_id/test_delete.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_triggers_trigger_id import delete # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1TriggersTriggerId(ApiTestMixin, unittest.TestCase): + """ + V1TriggersTriggerId unit test stubs + Delete Trigger # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = delete.ApiFordelete(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_triggers_trigger_id/test_get.py b/test/test_paths/test_v1_triggers_trigger_id/test_get.py new file mode 100644 index 00000000..2f8a87ed --- /dev/null +++ b/test/test_paths/test_v1_triggers_trigger_id/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_triggers_trigger_id import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1TriggersTriggerId(ApiTestMixin, unittest.TestCase): + """ + V1TriggersTriggerId unit test stubs + Get Trigger # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v1_triggers_trigger_id/test_put.py b/test/test_paths/test_v1_triggers_trigger_id/test_put.py new file mode 100644 index 00000000..65d21839 --- /dev/null +++ b/test/test_paths/test_v1_triggers_trigger_id/test_put.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v1_triggers_trigger_id import put # noqa: E501 + +from .. import ApiTestMixin + + +class TestV1TriggersTriggerId(ApiTestMixin, unittest.TestCase): + """ + V1TriggersTriggerId unit test stubs + Update Trigger # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = put.ApiForput(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_batch_completions/__init__.py b/test/test_paths/test_v2_batch_completions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_batch_completions/test_post.py b/test/test_paths/test_v2_batch_completions/test_post.py new file mode 100644 index 00000000..5c808bab --- /dev/null +++ b/test/test_paths/test_v2_batch_completions/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_batch_completions import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV2BatchCompletions(ApiTestMixin, unittest.TestCase): + """ + V2BatchCompletions unit test stubs + Batch Completions # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id/__init__.py b/test/test_paths/test_v2_batch_completions_batch_completion_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id/test_get.py b/test/test_paths/test_v2_batch_completions_batch_completion_id/test_get.py new file mode 100644 index 00000000..f95ae8dd --- /dev/null +++ b/test/test_paths/test_v2_batch_completions_batch_completion_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_batch_completions_batch_completion_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV2BatchCompletionsBatchCompletionId(ApiTestMixin, unittest.TestCase): + """ + V2BatchCompletionsBatchCompletionId unit test stubs + Get Batch Completion # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id/test_post.py b/test/test_paths/test_v2_batch_completions_batch_completion_id/test_post.py new file mode 100644 index 00000000..880f6f2d --- /dev/null +++ b/test/test_paths/test_v2_batch_completions_batch_completion_id/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_batch_completions_batch_completion_id import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV2BatchCompletionsBatchCompletionId(ApiTestMixin, unittest.TestCase): + """ + V2BatchCompletionsBatchCompletionId unit test stubs + Update Batch Completion # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/__init__.py b/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/test_post.py b/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/test_post.py new file mode 100644 index 00000000..377c68ed --- /dev/null +++ b/test/test_paths/test_v2_batch_completions_batch_completion_id_actions_cancel/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_batch_completions_batch_completion_id_actions_cancel import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV2BatchCompletionsBatchCompletionIdActionsCancel(ApiTestMixin, unittest.TestCase): + """ + V2BatchCompletionsBatchCompletionIdActionsCancel unit test stubs + Cancel Batch Completion # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_chat_completions/__init__.py b/test/test_paths/test_v2_chat_completions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_chat_completions/test_post.py b/test/test_paths/test_v2_chat_completions/test_post.py new file mode 100644 index 00000000..56741b47 --- /dev/null +++ b/test/test_paths/test_v2_chat_completions/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_chat_completions import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV2ChatCompletions(ApiTestMixin, unittest.TestCase): + """ + V2ChatCompletions unit test stubs + Chat Completion # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_completions/__init__.py b/test/test_paths/test_v2_completions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_completions/test_post.py b/test/test_paths/test_v2_completions/test_post.py new file mode 100644 index 00000000..de8a3262 --- /dev/null +++ b/test/test_paths/test_v2_completions/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_completions import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV2Completions(ApiTestMixin, unittest.TestCase): + """ + V2Completions unit test stubs + Completion # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_model_bundles/__init__.py b/test/test_paths/test_v2_model_bundles/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_model_bundles/test_get.py b/test/test_paths/test_v2_model_bundles/test_get.py new file mode 100644 index 00000000..34a4f0a9 --- /dev/null +++ b/test/test_paths/test_v2_model_bundles/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_model_bundles import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV2ModelBundles(ApiTestMixin, unittest.TestCase): + """ + V2ModelBundles unit test stubs + List Model Bundles # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_model_bundles/test_post.py b/test/test_paths/test_v2_model_bundles/test_post.py new file mode 100644 index 00000000..56804d0c --- /dev/null +++ b/test/test_paths/test_v2_model_bundles/test_post.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_model_bundles import post # noqa: E501 + +from .. import ApiTestMixin + + +class TestV2ModelBundles(ApiTestMixin, unittest.TestCase): + """ + V2ModelBundles unit test stubs + Create Model Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_model_bundles_clone_with_changes/__init__.py b/test/test_paths/test_v2_model_bundles_clone_with_changes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_model_bundles_clone_with_changes/test_post.py b/test/test_paths/test_v2_model_bundles_clone_with_changes/test_post.py new file mode 100644 index 00000000..f48288c0 --- /dev/null +++ b/test/test_paths/test_v2_model_bundles_clone_with_changes/test_post.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_model_bundles_clone_with_changes import ( # noqa: E501 + post, +) + +from .. import ApiTestMixin + + +class TestV2ModelBundlesCloneWithChanges(ApiTestMixin, unittest.TestCase): + """ + V2ModelBundlesCloneWithChanges unit test stubs + Clone Model Bundle With Changes # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_model_bundles_latest/__init__.py b/test/test_paths/test_v2_model_bundles_latest/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_model_bundles_latest/test_get.py b/test/test_paths/test_v2_model_bundles_latest/test_get.py new file mode 100644 index 00000000..566fa232 --- /dev/null +++ b/test/test_paths/test_v2_model_bundles_latest/test_get.py @@ -0,0 +1,40 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_model_bundles_latest import get # noqa: E501 + +from .. import ApiTestMixin + + +class TestV2ModelBundlesLatest(ApiTestMixin, unittest.TestCase): + """ + V2ModelBundlesLatest unit test stubs + Get Latest Model Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main() diff --git a/test/test_paths/test_v2_model_bundles_model_bundle_id/__init__.py b/test/test_paths/test_v2_model_bundles_model_bundle_id/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/test_paths/test_v2_model_bundles_model_bundle_id/test_get.py b/test/test_paths/test_v2_model_bundles_model_bundle_id/test_get.py new file mode 100644 index 00000000..8383ba28 --- /dev/null +++ b/test/test_paths/test_v2_model_bundles_model_bundle_id/test_get.py @@ -0,0 +1,42 @@ +# coding: utf-8 + +""" + + + Generated by: https://openapi-generator.tech +""" + +import unittest +from unittest.mock import patch + +import urllib3 + +import launch.api_client +from launch.api_client import api_client, configuration, schemas +from launch.api_client.paths.v2_model_bundles_model_bundle_id import ( # noqa: E501 + get, +) + +from .. import ApiTestMixin + + +class TestV2ModelBundlesModelBundleId(ApiTestMixin, unittest.TestCase): + """ + V2ModelBundlesModelBundleId unit test stubs + Get Model Bundle # noqa: E501 + """ + + _configuration = configuration.Configuration() + + def setUp(self): + used_api_client = api_client.ApiClient(configuration=self._configuration) + self.api = get.ApiForget(api_client=used_api_client) # noqa: E501 + + def tearDown(self): + pass + + response_status = 200 + + +if __name__ == "__main__": + unittest.main()