Skip to content

Commit

Permalink
feat(api): add uploads endpoints (openai#1568)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-app[bot] authored and megamanics committed Aug 14, 2024
1 parent 5012f76 commit 1b5b349
Show file tree
Hide file tree
Showing 19 changed files with 1,272 additions and 2 deletions.
4 changes: 2 additions & 2 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 64
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-518ca6c60061d3e8bc0971facf40d752f2aea62e3522cc168ad29a1f29cab3dd.yml
configured_endpoints: 68
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-77cfff37114bc9f141c7e6107eb5f1b38d8cc99bc3d4ce03a066db2b6b649c69.yml
26 changes: 26 additions & 0 deletions api.md
Original file line number Diff line number Diff line change
Expand Up @@ -415,3 +415,29 @@ Methods:
- <code title="get /batches/{batch_id}">client.batches.<a href="./src/openai/resources/batches.py">retrieve</a>(batch_id) -> <a href="./src/openai/types/batch.py">Batch</a></code>
- <code title="get /batches">client.batches.<a href="./src/openai/resources/batches.py">list</a>(\*\*<a href="src/openai/types/batch_list_params.py">params</a>) -> <a href="./src/openai/types/batch.py">SyncCursorPage[Batch]</a></code>
- <code title="post /batches/{batch_id}/cancel">client.batches.<a href="./src/openai/resources/batches.py">cancel</a>(batch_id) -> <a href="./src/openai/types/batch.py">Batch</a></code>

# Uploads

Types:

```python
from openai.types import Upload
```

Methods:

- <code title="post /uploads">client.uploads.<a href="./src/openai/resources/uploads/uploads.py">create</a>(\*\*<a href="src/openai/types/upload_create_params.py">params</a>) -> <a href="./src/openai/types/upload.py">Upload</a></code>
- <code title="post /uploads/{upload_id}/cancel">client.uploads.<a href="./src/openai/resources/uploads/uploads.py">cancel</a>(upload_id) -> <a href="./src/openai/types/upload.py">Upload</a></code>
- <code title="post /uploads/{upload_id}/complete">client.uploads.<a href="./src/openai/resources/uploads/uploads.py">complete</a>(upload_id, \*\*<a href="src/openai/types/upload_complete_params.py">params</a>) -> <a href="./src/openai/types/upload.py">Upload</a></code>

## Parts

Types:

```python
from openai.types.uploads import UploadPart
```

Methods:

- <code title="post /uploads/{upload_id}/parts">client.uploads.parts.<a href="./src/openai/resources/uploads/parts.py">create</a>(upload_id, \*\*<a href="src/openai/types/uploads/part_create_params.py">params</a>) -> <a href="./src/openai/types/uploads/upload_part.py">UploadPart</a></code>
8 changes: 8 additions & 0 deletions src/openai/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ class OpenAI(SyncAPIClient):
fine_tuning: resources.FineTuning
beta: resources.Beta
batches: resources.Batches
uploads: resources.Uploads
with_raw_response: OpenAIWithRawResponse
with_streaming_response: OpenAIWithStreamedResponse

Expand Down Expand Up @@ -143,6 +144,7 @@ def __init__(
self.fine_tuning = resources.FineTuning(self)
self.beta = resources.Beta(self)
self.batches = resources.Batches(self)
self.uploads = resources.Uploads(self)
self.with_raw_response = OpenAIWithRawResponse(self)
self.with_streaming_response = OpenAIWithStreamedResponse(self)

Expand Down Expand Up @@ -270,6 +272,7 @@ class AsyncOpenAI(AsyncAPIClient):
fine_tuning: resources.AsyncFineTuning
beta: resources.AsyncBeta
batches: resources.AsyncBatches
uploads: resources.AsyncUploads
with_raw_response: AsyncOpenAIWithRawResponse
with_streaming_response: AsyncOpenAIWithStreamedResponse

Expand Down Expand Up @@ -355,6 +358,7 @@ def __init__(
self.fine_tuning = resources.AsyncFineTuning(self)
self.beta = resources.AsyncBeta(self)
self.batches = resources.AsyncBatches(self)
self.uploads = resources.AsyncUploads(self)
self.with_raw_response = AsyncOpenAIWithRawResponse(self)
self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self)

Expand Down Expand Up @@ -483,6 +487,7 @@ def __init__(self, client: OpenAI) -> None:
self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning)
self.beta = resources.BetaWithRawResponse(client.beta)
self.batches = resources.BatchesWithRawResponse(client.batches)
self.uploads = resources.UploadsWithRawResponse(client.uploads)


class AsyncOpenAIWithRawResponse:
Expand All @@ -498,6 +503,7 @@ def __init__(self, client: AsyncOpenAI) -> None:
self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning)
self.beta = resources.AsyncBetaWithRawResponse(client.beta)
self.batches = resources.AsyncBatchesWithRawResponse(client.batches)
self.uploads = resources.AsyncUploadsWithRawResponse(client.uploads)


class OpenAIWithStreamedResponse:
Expand All @@ -513,6 +519,7 @@ def __init__(self, client: OpenAI) -> None:
self.fine_tuning = resources.FineTuningWithStreamingResponse(client.fine_tuning)
self.beta = resources.BetaWithStreamingResponse(client.beta)
self.batches = resources.BatchesWithStreamingResponse(client.batches)
self.uploads = resources.UploadsWithStreamingResponse(client.uploads)


class AsyncOpenAIWithStreamedResponse:
Expand All @@ -528,6 +535,7 @@ def __init__(self, client: AsyncOpenAI) -> None:
self.fine_tuning = resources.AsyncFineTuningWithStreamingResponse(client.fine_tuning)
self.beta = resources.AsyncBetaWithStreamingResponse(client.beta)
self.batches = resources.AsyncBatchesWithStreamingResponse(client.batches)
self.uploads = resources.AsyncUploadsWithStreamingResponse(client.uploads)


Client = OpenAI
Expand Down
14 changes: 14 additions & 0 deletions src/openai/resources/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,14 @@
BatchesWithStreamingResponse,
AsyncBatchesWithStreamingResponse,
)
from .uploads import (
Uploads,
AsyncUploads,
UploadsWithRawResponse,
AsyncUploadsWithRawResponse,
UploadsWithStreamingResponse,
AsyncUploadsWithStreamingResponse,
)
from .embeddings import (
Embeddings,
AsyncEmbeddings,
Expand Down Expand Up @@ -156,4 +164,10 @@
"AsyncBatchesWithRawResponse",
"BatchesWithStreamingResponse",
"AsyncBatchesWithStreamingResponse",
"Uploads",
"AsyncUploads",
"UploadsWithRawResponse",
"AsyncUploadsWithRawResponse",
"UploadsWithStreamingResponse",
"AsyncUploadsWithStreamingResponse",
]
6 changes: 6 additions & 0 deletions src/openai/resources/chat/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ def create(
exhausted.
- If set to 'default', the request will be processed using the default service
tier with a lower uptime SLA and no latency guarentee.
- When not set, the default behavior is 'auto'.
When this parameter is set, the response body will include the `service_tier`
utilized.
Expand Down Expand Up @@ -366,6 +367,7 @@ def create(
exhausted.
- If set to 'default', the request will be processed using the default service
tier with a lower uptime SLA and no latency guarentee.
- When not set, the default behavior is 'auto'.
When this parameter is set, the response body will include the `service_tier`
utilized.
Expand Down Expand Up @@ -554,6 +556,7 @@ def create(
exhausted.
- If set to 'default', the request will be processed using the default service
tier with a lower uptime SLA and no latency guarentee.
- When not set, the default behavior is 'auto'.
When this parameter is set, the response body will include the `service_tier`
utilized.
Expand Down Expand Up @@ -817,6 +820,7 @@ async def create(
exhausted.
- If set to 'default', the request will be processed using the default service
tier with a lower uptime SLA and no latency guarentee.
- When not set, the default behavior is 'auto'.
When this parameter is set, the response body will include the `service_tier`
utilized.
Expand Down Expand Up @@ -1012,6 +1016,7 @@ async def create(
exhausted.
- If set to 'default', the request will be processed using the default service
tier with a lower uptime SLA and no latency guarentee.
- When not set, the default behavior is 'auto'.
When this parameter is set, the response body will include the `service_tier`
utilized.
Expand Down Expand Up @@ -1200,6 +1205,7 @@ async def create(
exhausted.
- If set to 'default', the request will be processed using the default service
tier with a lower uptime SLA and no latency guarentee.
- When not set, the default behavior is 'auto'.
When this parameter is set, the response body will include the `service_tier`
utilized.
Expand Down
33 changes: 33 additions & 0 deletions src/openai/resources/uploads/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from .parts import (
Parts,
AsyncParts,
PartsWithRawResponse,
AsyncPartsWithRawResponse,
PartsWithStreamingResponse,
AsyncPartsWithStreamingResponse,
)
from .uploads import (
Uploads,
AsyncUploads,
UploadsWithRawResponse,
AsyncUploadsWithRawResponse,
UploadsWithStreamingResponse,
AsyncUploadsWithStreamingResponse,
)

__all__ = [
"Parts",
"AsyncParts",
"PartsWithRawResponse",
"AsyncPartsWithRawResponse",
"PartsWithStreamingResponse",
"AsyncPartsWithStreamingResponse",
"Uploads",
"AsyncUploads",
"UploadsWithRawResponse",
"AsyncUploadsWithRawResponse",
"UploadsWithStreamingResponse",
"AsyncUploadsWithStreamingResponse",
]
188 changes: 188 additions & 0 deletions src/openai/resources/uploads/parts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,188 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from __future__ import annotations

from typing import Mapping, cast

import httpx

from ... import _legacy_response
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
from ..._utils import (
extract_files,
maybe_transform,
deepcopy_minimal,
async_maybe_transform,
)
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
from ..._base_client import make_request_options
from ...types.uploads import part_create_params
from ...types.uploads.upload_part import UploadPart

__all__ = ["Parts", "AsyncParts"]


class Parts(SyncAPIResource):
@cached_property
def with_raw_response(self) -> PartsWithRawResponse:
return PartsWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> PartsWithStreamingResponse:
return PartsWithStreamingResponse(self)

def create(
self,
upload_id: str,
*,
data: FileTypes,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> UploadPart:
"""
Adds a
[Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
A Part represents a chunk of bytes from the file you are trying to upload.
Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
maximum of 8 GB.
It is possible to add multiple Parts in parallel. You can decide the intended
order of the Parts when you
[complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
Args:
data: The chunk of bytes for this Part.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
body = deepcopy_minimal({"data": data})
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return self._post(
f"/uploads/{upload_id}/parts",
body=maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=UploadPart,
)


class AsyncParts(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncPartsWithRawResponse:
return AsyncPartsWithRawResponse(self)

@cached_property
def with_streaming_response(self) -> AsyncPartsWithStreamingResponse:
return AsyncPartsWithStreamingResponse(self)

async def create(
self,
upload_id: str,
*,
data: FileTypes,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
) -> UploadPart:
"""
Adds a
[Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
[Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
A Part represents a chunk of bytes from the file you are trying to upload.
Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
maximum of 8 GB.
It is possible to add multiple Parts in parallel. You can decide the intended
order of the Parts when you
[complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
Args:
data: The chunk of bytes for this Part.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not upload_id:
raise ValueError(f"Expected a non-empty value for `upload_id` but received {upload_id!r}")
body = deepcopy_minimal({"data": data})
files = extract_files(cast(Mapping[str, object], body), paths=[["data"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
f"/uploads/{upload_id}/parts",
body=await async_maybe_transform(body, part_create_params.PartCreateParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=UploadPart,
)


class PartsWithRawResponse:
def __init__(self, parts: Parts) -> None:
self._parts = parts

self.create = _legacy_response.to_raw_response_wrapper(
parts.create,
)


class AsyncPartsWithRawResponse:
def __init__(self, parts: AsyncParts) -> None:
self._parts = parts

self.create = _legacy_response.async_to_raw_response_wrapper(
parts.create,
)


class PartsWithStreamingResponse:
def __init__(self, parts: Parts) -> None:
self._parts = parts

self.create = to_streamed_response_wrapper(
parts.create,
)


class AsyncPartsWithStreamingResponse:
def __init__(self, parts: AsyncParts) -> None:
self._parts = parts

self.create = async_to_streamed_response_wrapper(
parts.create,
)
Loading

0 comments on commit 1b5b349

Please sign in to comment.