Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 12 additions & 3 deletions src/openai/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import os
import inspect
import weakref
import threading
from typing import (
IO,
TYPE_CHECKING,
Expand Down Expand Up @@ -54,7 +55,6 @@
is_list,
is_given,
json_safe,
lru_cache,
is_mapping,
parse_date,
coerce_boolean,
Expand Down Expand Up @@ -799,12 +799,21 @@ class GenericModel(BaseGenericModel, BaseModel):
if not PYDANTIC_V1:
from pydantic import TypeAdapter as _TypeAdapter

_CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
_type_adapter_cache: threading.local = threading.local()

def _get_cached_type_adapter(type_: type[_T]) -> _TypeAdapter[_T]:
cache: dict[type[Any], _TypeAdapter[Any]] = getattr(_type_adapter_cache, "adapters", None) or {}
_type_adapter_cache.adapters = cache
adapter = cache.get(type_)
if adapter is None:
adapter = _TypeAdapter(type_)
cache[type_] = adapter
return adapter

if TYPE_CHECKING:
from pydantic import TypeAdapter
else:
TypeAdapter = _CachedTypeAdapter
TypeAdapter = _get_cached_type_adapter

def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
return TypeAdapter(type_).validate_python(value)
Expand Down
21 changes: 21 additions & 0 deletions tests/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -961,3 +961,24 @@ def __getattr__(self, attr: str) -> Item: ...
assert model.a.prop == 1
assert isinstance(model.a, Item)
assert model.other == "foo"


@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAdapter cache is only used in Pydantic v2")
def test_type_adapter_cache_is_thread_local() -> None:
"""Regression test for https://github.com/openai/openai-python/issues/2672

The TypeAdapter cache uses threading.local() to prevent memory leaks
in multi-threaded environments. Each thread maintains its own cache that
is cleaned up when the thread exits.
"""
import threading

from openai._models import TypeAdapter, _type_adapter_cache

# Verify the cache is thread-local
assert isinstance(_type_adapter_cache, threading.local)

# Verify TypeAdapter returns a cached instance for the same type
adapter1 = TypeAdapter(int)
adapter2 = TypeAdapter(int)
assert adapter1 is adapter2, "TypeAdapter should return cached instances for the same type"