Skip to content

[Bump] Litellm responses format #12253

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Jul 2, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions litellm/llms/openai/responses/transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from litellm.types.router import GenericLiteLLMParams

from ..common_utils import OpenAIError
from litellm.litellm_core_utils.llm_response_utils.convert_dict_to_response import _safe_convert_created_field

if TYPE_CHECKING:
from litellm.litellm_core_utils.litellm_logging import Logging as _LiteLLMLoggingObj
Expand Down Expand Up @@ -85,6 +86,7 @@ def transform_response_api_response(
"""No transform applied since outputs are in OpenAI spec already"""
try:
raw_response_json = raw_response.json()
raw_response_json["created_at"] = _safe_convert_created_field(raw_response_json["created_at"])
except Exception:
raise OpenAIError(
message=raw_response.text, status_code=raw_response.status_code
Expand Down
3 changes: 2 additions & 1 deletion litellm/types/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -1010,7 +1010,7 @@ class ResponseAPIUsage(BaseLiteLLMOpenAIResponseObject):

class ResponsesAPIResponse(BaseLiteLLMOpenAIResponseObject):
id: str
created_at: float
created_at: int
error: Optional[dict]
incomplete_details: Optional[IncompleteDetails]
instructions: Optional[str]
Expand All @@ -1034,6 +1034,7 @@ class ResponsesAPIResponse(BaseLiteLLMOpenAIResponseObject):
truncation: Optional[Literal["auto", "disabled"]]
usage: Optional[ResponseAPIUsage]
user: Optional[str]
store: Optional[bool] = None
# Define private attributes using PrivateAttr
_hidden_params: dict = PrivateAttr(default_factory=dict)

Expand Down
6 changes: 3 additions & 3 deletions tests/llm_responses_api_testing/base_responses_api.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

import httpx
import json
import pytest
Expand Down Expand Up @@ -51,8 +50,8 @@ def validate_responses_api_response(response, final_chunk: bool = False):
response["id"], str
), "Response should have a string 'id' field"
assert "created_at" in response and isinstance(
response["created_at"], (int, float)
), "Response should have a numeric 'created_at' field"
response["created_at"], int
), "Response should have an integer 'created_at' field"
assert "output" in response and isinstance(
response["output"], list
), "Response should have a list 'output' field"
Expand Down Expand Up @@ -80,6 +79,7 @@ def validate_responses_api_response(response, final_chunk: bool = False):
"truncation": (str, type(None)),
"usage": ResponseAPIUsage,
"user": (str, type(None)),
"store": (bool, type(None)),
}
if final_chunk is False:
optional_fields["usage"] = type(None)
Expand Down
142 changes: 142 additions & 0 deletions tests/llm_responses_api_testing/test_openai_responses_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@
import asyncio
from typing import Optional, cast
from unittest.mock import patch, AsyncMock
import httpx
from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
import time
import json

sys.path.insert(0, os.path.abspath("../.."))
import litellm
Expand Down Expand Up @@ -1152,3 +1157,140 @@ def test_mcp_tools_with_responses_api():
print(response_with_mcp_call)


@pytest.mark.asyncio
async def test_openai_responses_api_field_types():
"""Test that specific fields in the response have the correct types"""
litellm._turn_on_debug()
litellm.set_verbose = True

# Test with store=True
response = await litellm.aresponses(
model="gpt-4o",
input="hi",
)

# Verify created_at is an integer
assert isinstance(response.created_at, int), "created_at should be an integer"

# Verify store field is present and matches input
assert hasattr(response, "store"), "store field should be present"
assert response.store is True, "store field should match input value"

# Test without store parameter
response_without_store = await litellm.aresponses(
model="gpt-4o",
input="hi"
)

# Verify created_at is still an integer
assert isinstance(response_without_store.created_at, int), "created_at should be an integer"

# Verify store field is present but None when not specified
assert hasattr(response_without_store, "store"), "store field should be present"


@pytest.mark.asyncio
async def test_store_field_transformation():
"""Test store field transformation with mocked API responses"""
config = OpenAIResponsesAPIConfig()

# Initialize logging object with required parameters
logging_obj = LiteLLMLoggingObj(
model="gpt-4o",
messages=[],
stream=False,
call_type="aresponses",
start_time=time.time(),
litellm_call_id="test-call-id",
function_id="test-function-id"
)

# Base response data with all required fields
base_response = {
"id": "test_id",
"created_at": 1751443898,
"model": "gpt-4o",
"object": "response",
"output": [{"type": "message", "id": "msg_1", "status": "completed", "role": "assistant", "content": [{"type": "output_text", "text": "Hello", "annotations": []}]}],
"parallel_tool_calls": True,
"tool_choice": "auto",
"tools": [],
"error": None,
"incomplete_details": None,
"instructions": "test instructions",
"metadata": {},
"temperature": 0.7,
"top_p": 1.0,
"max_output_tokens": 100,
"previous_response_id": None,
"reasoning": None,
"status": "completed",
"text": None,
"truncation": "auto",
"usage": {"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
"user": "test_user"
}

# Test case 1: API returns store=True
mock_response_store_true = httpx.Response(
status_code=200,
content=json.dumps({**base_response, "store": True}).encode()
)

# Test case 2: API returns store=False
mock_response_store_false = httpx.Response(
status_code=200,
content=json.dumps({**base_response, "store": False}).encode()
)

# Test case 3: API returns store=null
mock_response_store_null = httpx.Response(
status_code=200,
content=json.dumps({**base_response, "store": None}).encode()
)

# Test case 4: API omits store field
mock_response_no_store = httpx.Response(
status_code=200,
content=json.dumps(base_response).encode()
)

# Test when store=True in request
logging_obj.optional_params = {"store": True}
response = config.transform_response_api_response(
model="gpt-4o",
raw_response=mock_response_store_true,
logging_obj=logging_obj
)
assert response.store is True, "store should be True when specified in request and API returns True"

# Test when store=False in request
logging_obj.optional_params = {"store": False}
response = config.transform_response_api_response(
model="gpt-4o",
raw_response=mock_response_store_false,
logging_obj=logging_obj
)
assert response.store is False, "store should be False when specified in request and API returns False"

# Test when store not in request but API returns null
response = config.transform_response_api_response(
model="gpt-4o",
raw_response=mock_response_store_null,
logging_obj=logging_obj
)
assert response.store is None, "store should be None when not specified in request and API returns null"

# Test when store not in request and API omits store field
response = config.transform_response_api_response(
model="gpt-4o",
raw_response=mock_response_no_store,
logging_obj=logging_obj
)
assert response.store is None, "store should be None when not specified in request and API omits store"

# Verify created_at is always converted to integer
assert isinstance(response.created_at, int), "created_at should always be converted to integer"
assert response.created_at == 1751443898, "created_at should maintain the same value after conversion"


Loading