Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(client): add ._request_id property to object responses #1707

Merged
merged 2 commits into from
Sep 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -417,6 +417,24 @@ Error codes are as followed:
| >=500 | `InternalServerError` |
| N/A | `APIConnectionError` |

## Request IDs

> For more information on debugging requests, see [these docs](https://platform.openai.com/docs/api-reference/debugging-requests)

All object responses in the SDK provide a `_request_id` property which is added from the `x-request-id` response header so that you can quickly log failing requests and report them back to OpenAI.

```python
completion = await client.chat.completions.create(
messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4"
)
print(completion._request_id) # req_123
```

Note that unlike other properties that use an `_` prefix, the `_request_id` property
*is* public. Unless documented otherwise, *all* other `_` prefix properties,
methods and modules are *private*.


### Retries

Certain errors are automatically retried 2 times by default, with a short exponential backoff.
Expand Down
5 changes: 4 additions & 1 deletion src/openai/_legacy_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@

from ._types import NoneType
from ._utils import is_given, extract_type_arg, is_annotated_type
from ._models import BaseModel, is_basemodel
from ._models import BaseModel, is_basemodel, add_request_id
from ._constants import RAW_RESPONSE_HEADER
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
from ._exceptions import APIResponseValidationError
Expand Down Expand Up @@ -138,6 +138,9 @@ class MyModel(BaseModel):
if is_given(self._options.post_parser):
parsed = self._options.post_parser(parsed)

if isinstance(parsed, BaseModel):
add_request_id(parsed, self.request_id)

self._parsed_by_type[cache_key] = parsed
return parsed

Expand Down
34 changes: 33 additions & 1 deletion src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import os
import inspect
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
from datetime import date, datetime
from typing_extensions import (
Unpack,
Expand Down Expand Up @@ -94,6 +94,23 @@ def model_fields_set(self) -> set[str]:
class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated]
extra: Any = pydantic.Extra.allow # type: ignore

if TYPE_CHECKING:
_request_id: Optional[str] = None
"""The ID of the request, returned via the X-Request-ID header. Useful for debugging requests and reporting issues to OpenAI.

This will **only** be set for the top-level response object, it will not be defined for nested objects. For example:

```py
completion = await client.chat.completions.create(...)
completion._request_id # req_id_xxx
completion.usage._request_id # raises `AttributeError`
```

Note: unlike other properties that use an `_` prefix, this property
*is* public. Unless documented otherwise, all other `_` prefix properties,
methods and modules are *private*.
"""

def to_dict(
self,
*,
Expand Down Expand Up @@ -662,6 +679,21 @@ def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
setattr(typ, "__pydantic_config__", config) # noqa: B010


def add_request_id(obj: BaseModel, request_id: str | None) -> None:
obj._request_id = request_id

# in Pydantic v1, using setattr like we do above causes the attribute
# to be included when serializing the model which we don't want in this
# case so we need to explicitly exclude it
if not PYDANTIC_V2:
try:
exclude_fields = obj.__exclude_fields__ # type: ignore
except AttributeError:
cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"}
else:
cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"}


# our use of subclasssing here causes weirdness for type checkers,
# so we just pretend that we don't subclass
if TYPE_CHECKING:
Expand Down
8 changes: 7 additions & 1 deletion src/openai/_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@

from ._types import NoneType
from ._utils import is_given, extract_type_arg, is_annotated_type, extract_type_var_from_base
from ._models import BaseModel, is_basemodel
from ._models import BaseModel, is_basemodel, add_request_id
from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER
from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type
from ._exceptions import OpenAIError, APIResponseValidationError
Expand Down Expand Up @@ -315,6 +315,9 @@ class MyModel(BaseModel):
if is_given(self._options.post_parser):
parsed = self._options.post_parser(parsed)

if isinstance(parsed, BaseModel):
add_request_id(parsed, self.request_id)

self._parsed_by_type[cache_key] = parsed
return parsed

Expand Down Expand Up @@ -419,6 +422,9 @@ class MyModel(BaseModel):
if is_given(self._options.post_parser):
parsed = self._options.post_parser(parsed)

if isinstance(parsed, BaseModel):
add_request_id(parsed, self.request_id)

self._parsed_by_type[cache_key] = parsed
return parsed

Expand Down
21 changes: 21 additions & 0 deletions tests/test_legacy_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,27 @@ def test_response_parse_custom_model(client: OpenAI) -> None:
assert obj.bar == 2


def test_response_basemodel_request_id(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(
200,
headers={"x-request-id": "my-req-id"},
content=json.dumps({"foo": "hello!", "bar": 2}),
),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)

obj = response.parse(to=CustomModel)
assert obj._request_id == "my-req-id"
assert obj.foo == "hello!"
assert obj.bar == 2
assert obj.to_dict() == {"foo": "hello!", "bar": 2}


def test_response_parse_annotated_type(client: OpenAI) -> None:
response = LegacyAPIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
Expand Down
43 changes: 43 additions & 0 deletions tests/test_response.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,49 @@ async def test_async_response_parse_custom_model(async_client: AsyncOpenAI) -> N
assert obj.bar == 2


def test_response_basemodel_request_id(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(
200,
headers={"x-request-id": "my-req-id"},
content=json.dumps({"foo": "hello!", "bar": 2}),
),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)

obj = response.parse(to=CustomModel)
assert obj._request_id == "my-req-id"
assert obj.foo == "hello!"
assert obj.bar == 2
assert obj.to_dict() == {"foo": "hello!", "bar": 2}


@pytest.mark.asyncio
async def test_async_response_basemodel_request_id(client: OpenAI) -> None:
response = AsyncAPIResponse(
raw=httpx.Response(
200,
headers={"x-request-id": "my-req-id"},
content=json.dumps({"foo": "hello!", "bar": 2}),
),
client=client,
stream=False,
stream_cls=None,
cast_to=str,
options=FinalRequestOptions.construct(method="get", url="/foo"),
)

obj = await response.parse(to=CustomModel)
assert obj._request_id == "my-req-id"
assert obj.foo == "hello!"
assert obj.bar == 2
assert obj.to_dict() == {"foo": "hello!", "bar": 2}


def test_response_parse_annotated_type(client: OpenAI) -> None:
response = APIResponse(
raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})),
Expand Down