From 18bd942f9463ce92396bedd5aab07a2fbc381fbe Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:54:06 +0300 Subject: [PATCH] ci: regenerated with OpenAPI Doc , Speakeasy CLI 1.405.6 (#15) Co-authored-by: speakeasybot --- .speakeasy/gen.lock | 31 ++- .speakeasy/gen.yaml | 4 +- .speakeasy/workflow.lock | 12 +- README.md | 20 +- RELEASES.md | 12 +- codeSamples.yaml | 19 ++ docs/models/components/bodygenllm.md | 14 ++ docs/models/components/llmresponse.md | 9 + docs/models/errors/httpvalidationerror.md | 2 - docs/models/operations/genllmresponse.md | 9 + docs/sdks/generate/README.md | 84 +++++--- pyproject.toml | 2 +- src/livepeer_ai/generate.py | 186 ++++++++++++++++++ src/livepeer_ai/models/components/__init__.py | 6 + .../models/components/body_genllm.py | 33 ++++ .../models/components/llmresponse.py | 16 ++ .../models/errors/httpvalidationerror.py | 2 - src/livepeer_ai/models/operations/__init__.py | 3 + src/livepeer_ai/models/operations/genllm.py | 26 +++ src/livepeer_ai/sdkconfiguration.py | 8 +- 20 files changed, 446 insertions(+), 52 deletions(-) create mode 100644 docs/models/components/bodygenllm.md create mode 100644 docs/models/components/llmresponse.md create mode 100644 docs/models/operations/genllmresponse.md create mode 100644 src/livepeer_ai/models/components/body_genllm.py create mode 100644 src/livepeer_ai/models/components/llmresponse.py create mode 100644 src/livepeer_ai/models/operations/genllm.py diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 6110cdc..6090f7c 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,11 @@ lockVersion: 2.0.0 id: 2d5dbf5a-62be-411a-9c7b-bc7b6dc79e13 management: - docChecksum: 4a28bceb165adf1c1dd97bbc8fe41a27 - docVersion: v0.5.0 - speakeasyVersion: 1.401.2 - generationVersion: 2.421.3 - releaseVersion: 0.5.1 - configChecksum: 57016dfe6f67d305306547b9aa2c1601 + docChecksum: 21334b8071b6c8e28a5dda229ceafd7e + speakeasyVersion: 1.405.6 + generationVersion: 2.428.1 + releaseVersion: 0.5.2 + configChecksum: 7327f5aa5f764c498c18bf07efe8face repoURL: https://github.com/livepeer/livepeer-ai-python.git installationURL: https://github.com/livepeer/livepeer-ai-python.git published: true @@ -14,7 +13,7 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.3 - core: 5.5.4 + core: 5.5.7 defaultEnabledRetries: 0.2.0 envVarSecurityUsage: 0.3.1 globalSecurity: 3.0.2 @@ -23,7 +22,7 @@ features: globalServerURLs: 3.0.0 multipartFileContentType: 1.0.0 nameOverrides: 3.0.0 - responseFormat: 1.0.0 + responseFormat: 1.0.1 retries: 3.0.2 sdkHooks: 1.0.0 unions: 3.0.2 @@ -39,6 +38,7 @@ generatedFiles: - docs/models/components/bodygenimagetoimage.md - docs/models/components/bodygenimagetovideo.md - docs/models/components/bodygenimagetovideoimage.md + - docs/models/components/bodygenllm.md - docs/models/components/bodygensegmentanything2.md - docs/models/components/bodygensegmentanything2image.md - docs/models/components/bodygenupscale.md @@ -47,6 +47,7 @@ generatedFiles: - docs/models/components/httpmetadata.md - docs/models/components/image.md - docs/models/components/imageresponse.md + - docs/models/components/llmresponse.md - docs/models/components/loc.md - docs/models/components/masksresponse.md - docs/models/components/media.md @@ -60,6 +61,7 @@ generatedFiles: - docs/models/operations/genaudiototextresponse.md - docs/models/operations/genimagetoimageresponse.md - docs/models/operations/genimagetovideoresponse.md + - docs/models/operations/genllmresponse.md - docs/models/operations/gensegmentanything2response.md - docs/models/operations/gentexttoimageresponse.md - docs/models/operations/genupscaleresponse.md @@ -85,11 +87,13 @@ generatedFiles: - src/livepeer_ai/models/components/body_genaudiototext.py - src/livepeer_ai/models/components/body_genimagetoimage.py - src/livepeer_ai/models/components/body_genimagetovideo.py + - src/livepeer_ai/models/components/body_genllm.py - src/livepeer_ai/models/components/body_gensegmentanything2.py - src/livepeer_ai/models/components/body_genupscale.py - src/livepeer_ai/models/components/chunk.py - src/livepeer_ai/models/components/httpmetadata.py - src/livepeer_ai/models/components/imageresponse.py + - src/livepeer_ai/models/components/llmresponse.py - src/livepeer_ai/models/components/masksresponse.py - src/livepeer_ai/models/components/media.py - src/livepeer_ai/models/components/security.py @@ -105,6 +109,7 @@ generatedFiles: - src/livepeer_ai/models/operations/genaudiototext.py - src/livepeer_ai/models/operations/genimagetoimage.py - src/livepeer_ai/models/operations/genimagetovideo.py + - src/livepeer_ai/models/operations/genllm.py - src/livepeer_ai/models/operations/gensegmentanything2.py - src/livepeer_ai/models/operations/gentexttoimage.py - src/livepeer_ai/models/operations/genupscale.py @@ -189,3 +194,13 @@ examples: "400": application/json: {"detail": {"msg": ""}} "422": {} + genLLM: + speakeasy-default-gen-LLM: + requestBody: + application/x-www-form-urlencoded: {"prompt": ""} + responses: + "200": + application/json: {"response": "", "tokens_used": 60712} + "400": + application/json: {"detail": {"msg": ""}} + "422": {} diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 4346a2d..5ef80af 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -12,7 +12,7 @@ generation: auth: oAuth2ClientCredentialsEnabled: true python: - version: 0.5.1 + version: 0.5.2 additionalDependencies: dev: {} main: {} @@ -22,6 +22,8 @@ python: clientServerStatusCodesAsErrors: true description: Python Client SDK for the Livepeer AI API. enumFormat: enum + fixFlags: + responseRequiredSep2024: false flattenGlobalSecurity: true flattenRequests: false imports: diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index e711b12..f7c21a4 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,9 +1,9 @@ -speakeasyVersion: 1.401.2 +speakeasyVersion: 1.405.6 sources: livepeer_ai-OAS: sourceNamespace: livepeer-ai-oas - sourceRevisionDigest: sha256:4f6e897095eeeb8d23e6ecc1421334cf273ffd96b45a1e6e79bf04b39d5ce543 - sourceBlobDigest: sha256:ffdfa01671cab29ecdb8b014925f080497ace7616108fe5a4b9153cc0e562f93 + sourceRevisionDigest: sha256:6c083bb976ce725193fa116da1525e922c5cc008dc6f138ca265003b272d1667 + sourceBlobDigest: sha256:1381f76cad0ec28b51d5559fdf87a0b14f39e2946fe25e0c21c0b8b0fe42cf99 tags: - latest - main @@ -16,10 +16,10 @@ targets: livepeer-ai-python: source: livepeer_ai-OAS sourceNamespace: livepeer-ai-oas - sourceRevisionDigest: sha256:4f6e897095eeeb8d23e6ecc1421334cf273ffd96b45a1e6e79bf04b39d5ce543 - sourceBlobDigest: sha256:ffdfa01671cab29ecdb8b014925f080497ace7616108fe5a4b9153cc0e562f93 + sourceRevisionDigest: sha256:6c083bb976ce725193fa116da1525e922c5cc008dc6f138ca265003b272d1667 + sourceBlobDigest: sha256:1381f76cad0ec28b51d5559fdf87a0b14f39e2946fe25e0c21c0b8b0fe42cf99 codeSamplesNamespace: code-samples-python-livepeer-python - codeSamplesRevisionDigest: sha256:196012791664b5a4437be3bf2d09ed1a3b6ee7bd83da508a3def8332836f5b31 + codeSamplesRevisionDigest: sha256:2a3c5eac4d47d04d2509592307bd0e451ce09ecacdba64a6638329f2f3286f76 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/README.md b/README.md index 2c4aedb..a7951ff 100644 --- a/README.md +++ b/README.md @@ -100,6 +100,7 @@ asyncio.run(main()) * [upscale](docs/sdks/generate/README.md#upscale) - Upscale * [audio_to_text](docs/sdks/generate/README.md#audio_to_text) - Audio To Text * [segment_anything2](docs/sdks/generate/README.md#segment_anything2) - Segment Anything 2 +* [llm](docs/sdks/generate/README.md#llm) - LLM @@ -186,13 +187,24 @@ if res.image_response is not None: ## Error Handling -Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an exception. -| Error Object | Status Code | Content Type | +By default, an API error will raise a errors.SDKError exception, which has the following properties: + +| Property | Type | Description | +|-----------------|------------------|-----------------------| +| `.status_code` | *int* | The HTTP status code | +| `.message` | *str* | The error message | +| `.raw_response` | *httpx.Response* | The raw HTTP response | +| `.body` | *str* | The response content | + +When custom error responses are specified for an operation, the SDK may also raise their associated exceptions. You can refer to respective *Errors* tables in SDK docs for more details on possible exception types for each operation. For example, the `text_to_image_async` method may raise the following exceptions: + +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| errors.HTTPError | 400,401,500 | application/json | +| errors.HTTPError | 400, 401, 500 | application/json | | errors.HTTPValidationError | 422 | application/json | -| errors.SDKError | 4xx-5xx | */* | +| errors.SDKError | 4XX, 5XX | \*/\* | ### Example diff --git a/RELEASES.md b/RELEASES.md index ea00be4..41a43ea 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -78,4 +78,14 @@ Based on: ### Generated - [python v0.5.1] . ### Releases -- [PyPI v0.5.1] https://pypi.org/project/livepeer-ai/0.5.1 - . \ No newline at end of file +- [PyPI v0.5.1] https://pypi.org/project/livepeer-ai/0.5.1 - . + +## 2024-10-01 20:49:55 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.405.6 (2.428.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v0.5.2] . +### Releases +- [PyPI v0.5.2] https://pypi.org/project/livepeer-ai/0.5.2 - . \ No newline at end of file diff --git a/codeSamples.yaml b/codeSamples.yaml index 4bbe33d..b2d35c3 100644 --- a/codeSamples.yaml +++ b/codeSamples.yaml @@ -70,6 +70,25 @@ actions: if res.video_response is not None: # handle response pass + - target: $["paths"]["/llm"]["post"] + update: + x-codeSamples: + - lang: python + label: genLLM + source: |- + from livepeer_ai import Livepeer + + s = Livepeer( + http_bearer="", + ) + + res = s.generate.llm(request={ + "prompt": "", + }) + + if res.llm_response is not None: + # handle response + pass - target: $["paths"]["/segment-anything-2"]["post"] update: x-codeSamples: diff --git a/docs/models/components/bodygenllm.md b/docs/models/components/bodygenllm.md new file mode 100644 index 0000000..5bbdda0 --- /dev/null +++ b/docs/models/components/bodygenllm.md @@ -0,0 +1,14 @@ +# BodyGenLLM + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `prompt` | *str* | :heavy_check_mark: | N/A | +| `model_id` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `system_msg` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | N/A | +| `max_tokens` | *Optional[int]* | :heavy_minus_sign: | N/A | +| `history` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/components/llmresponse.md b/docs/models/components/llmresponse.md new file mode 100644 index 0000000..d7c47f6 --- /dev/null +++ b/docs/models/components/llmresponse.md @@ -0,0 +1,9 @@ +# LLMResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `response` | *str* | :heavy_check_mark: | N/A | +| `tokens_used` | *int* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/errors/httpvalidationerror.md b/docs/models/errors/httpvalidationerror.md index 1812a6a..5cf28a6 100644 --- a/docs/models/errors/httpvalidationerror.md +++ b/docs/models/errors/httpvalidationerror.md @@ -1,7 +1,5 @@ # HTTPValidationError -Validation Error - ## Fields diff --git a/docs/models/operations/genllmresponse.md b/docs/models/operations/genllmresponse.md new file mode 100644 index 0000000..74e92e1 --- /dev/null +++ b/docs/models/operations/genllmresponse.md @@ -0,0 +1,9 @@ +# GenLLMResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `llm_response` | [Optional[components.LLMResponse]](../../models/components/llmresponse.md) | :heavy_minus_sign: | Successful Response | \ No newline at end of file diff --git a/docs/sdks/generate/README.md b/docs/sdks/generate/README.md index 59b131f..122bcd0 100644 --- a/docs/sdks/generate/README.md +++ b/docs/sdks/generate/README.md @@ -11,6 +11,7 @@ * [upscale](#upscale) - Upscale * [audio_to_text](#audio_to_text) - Audio To Text * [segment_anything2](#segment_anything2) - Segment Anything 2 +* [llm](#llm) - LLM ## text_to_image @@ -48,12 +49,11 @@ if res.image_response is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| errors.HTTPError | 400,401,500 | application/json | +| errors.HTTPError | 400, 401, 500 | application/json | | errors.HTTPValidationError | 422 | application/json | -| errors.SDKError | 4xx-5xx | */* | - +| errors.SDKError | 4XX, 5XX | \*/\* | ## image_to_image @@ -95,12 +95,11 @@ if res.image_response is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| errors.HTTPError | 400,401,500 | application/json | +| errors.HTTPError | 400, 401, 500 | application/json | | errors.HTTPValidationError | 422 | application/json | -| errors.SDKError | 4xx-5xx | */* | - +| errors.SDKError | 4XX, 5XX | \*/\* | ## image_to_video @@ -141,12 +140,11 @@ if res.video_response is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| errors.HTTPError | 400,401,500 | application/json | +| errors.HTTPError | 400, 401, 500 | application/json | | errors.HTTPValidationError | 422 | application/json | -| errors.SDKError | 4xx-5xx | */* | - +| errors.SDKError | 4XX, 5XX | \*/\* | ## upscale @@ -188,12 +186,11 @@ if res.image_response is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| errors.HTTPError | 400,401,500 | application/json | +| errors.HTTPError | 400, 401, 500 | application/json | | errors.HTTPValidationError | 422 | application/json | -| errors.SDKError | 4xx-5xx | */* | - +| errors.SDKError | 4XX, 5XX | \*/\* | ## audio_to_text @@ -234,12 +231,11 @@ if res.text_response is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| errors.HTTPError | 400,401,413,500 | application/json | +| errors.HTTPError | 400, 401, 413, 500 | application/json | | errors.HTTPValidationError | 422 | application/json | -| errors.SDKError | 4xx-5xx | */* | - +| errors.SDKError | 4XX, 5XX | \*/\* | ## segment_anything2 @@ -280,8 +276,50 @@ if res.masks_response is not None: ### Errors -| Error Object | Status Code | Content Type | +| Error Type | Status Code | Content Type | +| -------------------------- | -------------------------- | -------------------------- | +| errors.HTTPError | 400, 401, 500 | application/json | +| errors.HTTPValidationError | 422 | application/json | +| errors.SDKError | 4XX, 5XX | \*/\* | + +## llm + +Generate text using a language model. + +### Example Usage + +```python +from livepeer_ai import Livepeer + +s = Livepeer( + http_bearer="", +) + +res = s.generate.llm(request={ + "prompt": "", +}) + +if res.llm_response is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | ------------------------------------------------------------------- | +| `request` | [components.BodyGenLLM](../../models/components/bodygenllm.md) | :heavy_check_mark: | The request object to use for the request. | +| `retries` | [Optional[utils.RetryConfig]](../../models/utils/retryconfig.md) | :heavy_minus_sign: | Configuration to override the default retry behavior of the client. | + +### Response + +**[operations.GenLLMResponse](../../models/operations/genllmresponse.md)** + +### Errors + +| Error Type | Status Code | Content Type | | -------------------------- | -------------------------- | -------------------------- | -| errors.HTTPError | 400,401,500 | application/json | +| errors.HTTPError | 400, 401, 500 | application/json | | errors.HTTPValidationError | 422 | application/json | -| errors.SDKError | 4xx-5xx | */* | +| errors.SDKError | 4XX, 5XX | \*/\* | \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index afad163..2c45817 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "livepeer-ai" -version = "0.5.1" +version = "0.5.2" description = "Python Client SDK for the Livepeer AI API." authors = ["Speakeasy",] readme = "README-PYPI.md" diff --git a/src/livepeer_ai/generate.py b/src/livepeer_ai/generate.py index f5878ef..e803f6e 100644 --- a/src/livepeer_ai/generate.py +++ b/src/livepeer_ai/generate.py @@ -1150,3 +1150,189 @@ async def segment_anything2_async( http_res.text, http_res, ) + + def llm( + self, + *, + request: Union[components.BodyGenLLM, components.BodyGenLLMTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> operations.GenLLMResponse: + r"""LLM + + Generate text using a language model. + + :param request: The request object to send. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + if not isinstance(request, BaseModel): + request = utils.unmarshal(request, components.BodyGenLLM) + request = cast(components.BodyGenLLM, request) + + req = self.build_request( + method="POST", + path="/llm", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "form", components.BodyGenLLM + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = self.do_request( + hook_ctx=HookContext( + operation_id="genLLM", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["400", "401", "422", "4XX", "500", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return operations.GenLLMResponse( + llm_response=utils.unmarshal_json( + http_res.text, Optional[components.LLMResponse] + ), + http_meta=components.HTTPMetadata(request=req, response=http_res), + ) + if utils.match_response(http_res, ["400", "401", "500"], "application/json"): + data = utils.unmarshal_json(http_res.text, errors.HTTPErrorData) + raise errors.HTTPError(data=data) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, errors.HTTPValidationErrorData) + raise errors.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise errors.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + raise errors.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) + + async def llm_async( + self, + *, + request: Union[components.BodyGenLLM, components.BodyGenLLMTypedDict], + retries: OptionalNullable[utils.RetryConfig] = UNSET, + server_url: Optional[str] = None, + timeout_ms: Optional[int] = None, + ) -> operations.GenLLMResponse: + r"""LLM + + Generate text using a language model. + + :param request: The request object to send. + :param retries: Override the default retry configuration for this method + :param server_url: Override the default server URL for this method + :param timeout_ms: Override the default request timeout configuration for this method in milliseconds + """ + base_url = None + url_variables = None + if timeout_ms is None: + timeout_ms = self.sdk_configuration.timeout_ms + + if server_url is not None: + base_url = server_url + + if not isinstance(request, BaseModel): + request = utils.unmarshal(request, components.BodyGenLLM) + request = cast(components.BodyGenLLM, request) + + req = self.build_request_async( + method="POST", + path="/llm", + base_url=base_url, + url_variables=url_variables, + request=request, + request_body_required=True, + request_has_path_params=False, + request_has_query_params=True, + user_agent_header="user-agent", + accept_header_value="application/json", + security=self.sdk_configuration.security, + get_serialized_body=lambda: utils.serialize_request_body( + request, False, False, "form", components.BodyGenLLM + ), + timeout_ms=timeout_ms, + ) + + if retries == UNSET: + if self.sdk_configuration.retry_config is not UNSET: + retries = self.sdk_configuration.retry_config + + retry_config = None + if isinstance(retries, utils.RetryConfig): + retry_config = (retries, ["429", "500", "502", "503", "504"]) + + http_res = await self.do_request_async( + hook_ctx=HookContext( + operation_id="genLLM", + oauth2_scopes=[], + security_source=self.sdk_configuration.security, + ), + request=req, + error_status_codes=["400", "401", "422", "4XX", "500", "5XX"], + retry_config=retry_config, + ) + + data: Any = None + if utils.match_response(http_res, "200", "application/json"): + return operations.GenLLMResponse( + llm_response=utils.unmarshal_json( + http_res.text, Optional[components.LLMResponse] + ), + http_meta=components.HTTPMetadata(request=req, response=http_res), + ) + if utils.match_response(http_res, ["400", "401", "500"], "application/json"): + data = utils.unmarshal_json(http_res.text, errors.HTTPErrorData) + raise errors.HTTPError(data=data) + if utils.match_response(http_res, "422", "application/json"): + data = utils.unmarshal_json(http_res.text, errors.HTTPValidationErrorData) + raise errors.HTTPValidationError(data=data) + if utils.match_response(http_res, ["4XX", "5XX"], "*"): + raise errors.SDKError( + "API error occurred", http_res.status_code, http_res.text, http_res + ) + + content_type = http_res.headers.get("Content-Type") + raise errors.SDKError( + f"Unexpected response received (code: {http_res.status_code}, type: {content_type})", + http_res.status_code, + http_res.text, + http_res, + ) diff --git a/src/livepeer_ai/models/components/__init__.py b/src/livepeer_ai/models/components/__init__.py index 1dceb67..093587b 100644 --- a/src/livepeer_ai/models/components/__init__.py +++ b/src/livepeer_ai/models/components/__init__.py @@ -19,6 +19,7 @@ BodyGenImageToVideoImageTypedDict, BodyGenImageToVideoTypedDict, ) +from .body_genllm import BodyGenLLM, BodyGenLLMTypedDict from .body_gensegmentanything2 import ( BodyGenSegmentAnything2, BodyGenSegmentAnything2Image, @@ -34,6 +35,7 @@ from .chunk import Chunk, ChunkTypedDict from .httpmetadata import HTTPMetadata, HTTPMetadataTypedDict from .imageresponse import ImageResponse, ImageResponseTypedDict +from .llmresponse import LLMResponse, LLMResponseTypedDict from .masksresponse import MasksResponse, MasksResponseTypedDict from .media import Media, MediaTypedDict from .security import Security, SecurityTypedDict @@ -60,6 +62,8 @@ "BodyGenImageToVideoImage", "BodyGenImageToVideoImageTypedDict", "BodyGenImageToVideoTypedDict", + "BodyGenLLM", + "BodyGenLLMTypedDict", "BodyGenSegmentAnything2", "BodyGenSegmentAnything2Image", "BodyGenSegmentAnything2ImageTypedDict", @@ -76,6 +80,8 @@ "ImageResponse", "ImageResponseTypedDict", "ImageTypedDict", + "LLMResponse", + "LLMResponseTypedDict", "Loc", "LocTypedDict", "MasksResponse", diff --git a/src/livepeer_ai/models/components/body_genllm.py b/src/livepeer_ai/models/components/body_genllm.py new file mode 100644 index 0000000..6efc188 --- /dev/null +++ b/src/livepeer_ai/models/components/body_genllm.py @@ -0,0 +1,33 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from livepeer_ai.types import BaseModel +from livepeer_ai.utils import FieldMetadata +from typing import Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class BodyGenLLMTypedDict(TypedDict): + prompt: str + model_id: NotRequired[str] + system_msg: NotRequired[str] + temperature: NotRequired[float] + max_tokens: NotRequired[int] + history: NotRequired[str] + stream: NotRequired[bool] + + +class BodyGenLLM(BaseModel): + prompt: Annotated[str, FieldMetadata(form=True)] + + model_id: Annotated[Optional[str], FieldMetadata(form=True)] = "" + + system_msg: Annotated[Optional[str], FieldMetadata(form=True)] = "" + + temperature: Annotated[Optional[float], FieldMetadata(form=True)] = 0.7 + + max_tokens: Annotated[Optional[int], FieldMetadata(form=True)] = 256 + + history: Annotated[Optional[str], FieldMetadata(form=True)] = "[]" + + stream: Annotated[Optional[bool], FieldMetadata(form=True)] = False diff --git a/src/livepeer_ai/models/components/llmresponse.py b/src/livepeer_ai/models/components/llmresponse.py new file mode 100644 index 0000000..3b165e7 --- /dev/null +++ b/src/livepeer_ai/models/components/llmresponse.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from livepeer_ai.types import BaseModel +from typing import TypedDict + + +class LLMResponseTypedDict(TypedDict): + response: str + tokens_used: int + + +class LLMResponse(BaseModel): + response: str + + tokens_used: int diff --git a/src/livepeer_ai/models/errors/httpvalidationerror.py b/src/livepeer_ai/models/errors/httpvalidationerror.py index 0b11c23..f3217e1 100644 --- a/src/livepeer_ai/models/errors/httpvalidationerror.py +++ b/src/livepeer_ai/models/errors/httpvalidationerror.py @@ -12,8 +12,6 @@ class HTTPValidationErrorData(BaseModel): class HTTPValidationError(Exception): - r"""Validation Error""" - data: HTTPValidationErrorData def __init__(self, data: HTTPValidationErrorData): diff --git a/src/livepeer_ai/models/operations/__init__.py b/src/livepeer_ai/models/operations/__init__.py index df48b00..6984013 100644 --- a/src/livepeer_ai/models/operations/__init__.py +++ b/src/livepeer_ai/models/operations/__init__.py @@ -3,6 +3,7 @@ from .genaudiototext import GenAudioToTextResponse, GenAudioToTextResponseTypedDict from .genimagetoimage import GenImageToImageResponse, GenImageToImageResponseTypedDict from .genimagetovideo import GenImageToVideoResponse, GenImageToVideoResponseTypedDict +from .genllm import GenLLMResponse, GenLLMResponseTypedDict from .gensegmentanything2 import ( GenSegmentAnything2Response, GenSegmentAnything2ResponseTypedDict, @@ -17,6 +18,8 @@ "GenImageToImageResponseTypedDict", "GenImageToVideoResponse", "GenImageToVideoResponseTypedDict", + "GenLLMResponse", + "GenLLMResponseTypedDict", "GenSegmentAnything2Response", "GenSegmentAnything2ResponseTypedDict", "GenTextToImageResponse", diff --git a/src/livepeer_ai/models/operations/genllm.py b/src/livepeer_ai/models/operations/genllm.py new file mode 100644 index 0000000..f2c420f --- /dev/null +++ b/src/livepeer_ai/models/operations/genllm.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from livepeer_ai.models.components import ( + httpmetadata as components_httpmetadata, + llmresponse as components_llmresponse, +) +from livepeer_ai.types import BaseModel +import pydantic +from typing import Optional, TypedDict +from typing_extensions import Annotated, NotRequired + + +class GenLLMResponseTypedDict(TypedDict): + http_meta: components_httpmetadata.HTTPMetadataTypedDict + llm_response: NotRequired[components_llmresponse.LLMResponseTypedDict] + r"""Successful Response""" + + +class GenLLMResponse(BaseModel): + http_meta: Annotated[ + Optional[components_httpmetadata.HTTPMetadata], pydantic.Field(exclude=True) + ] = None + + llm_response: Optional[components_llmresponse.LLMResponse] = None + r"""Successful Response""" diff --git a/src/livepeer_ai/sdkconfiguration.py b/src/livepeer_ai/sdkconfiguration.py index ea2d376..fd3cb67 100644 --- a/src/livepeer_ai/sdkconfiguration.py +++ b/src/livepeer_ai/sdkconfiguration.py @@ -30,10 +30,10 @@ class SDKConfiguration: server_url: Optional[str] = "" server_idx: Optional[int] = 0 language: str = "python" - openapi_doc_version: str = "v0.5.0" - sdk_version: str = "0.5.1" - gen_version: str = "2.421.3" - user_agent: str = "speakeasy-sdk/python 0.5.1 2.421.3 v0.5.0 livepeer-ai" + openapi_doc_version: str = "" + sdk_version: str = "0.5.2" + gen_version: str = "2.428.1" + user_agent: str = "speakeasy-sdk/python 0.5.2 2.428.1 livepeer-ai" retry_config: OptionalNullable[RetryConfig] = Field(default_factory=lambda: UNSET) timeout_ms: Optional[int] = None