-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathllm_matched_responses_test.py
More file actions
144 lines (103 loc) · 5.25 KB
/
llm_matched_responses_test.py
File metadata and controls
144 lines (103 loc) · 5.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
"""Unit tests for the llm-matched-responses plugin."""
import json
from pathlib import Path
import llm
import pytest
from llm_matched_responses import MatchedResponsesModel
from llm_matched_responses import _try_parse_tool_calls
from llm_matched_responses import resolve_response
def test_default_echo() -> None:
assert resolve_response("Hello world") == "Echo: Hello world"
def test_empty_message() -> None:
assert resolve_response("") == "Echo: (empty message)"
def test_static_env_override(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("LLM_MATCHED_RESPONSE", "Static reply")
assert resolve_response("anything") == "Static reply"
def test_static_env_override_empty_input(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("LLM_MATCHED_RESPONSE", "Always this")
assert resolve_response("") == "Always this"
def test_static_env_takes_precedence_over_file(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
responses_file = tmp_path / "responses.json"
responses_file.write_text(json.dumps({"hello": "from file"}))
monkeypatch.setenv("LLM_MATCHED_RESPONSE", "from env")
monkeypatch.setenv("LLM_MATCHED_RESPONSES_FILE", str(responses_file))
assert resolve_response("hello") == "from env"
def test_responses_file_substring_match(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
responses_file = tmp_path / "responses.json"
responses_file.write_text(json.dumps({"hello": "Hi!", "help": "I can help."}))
monkeypatch.setenv("LLM_MATCHED_RESPONSES_FILE", str(responses_file))
assert resolve_response("hello world") == "Hi!"
def test_responses_file_no_match_falls_back(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
responses_file = tmp_path / "responses.json"
responses_file.write_text(json.dumps({"hello": "Hi!"}))
monkeypatch.setenv("LLM_MATCHED_RESPONSES_FILE", str(responses_file))
assert resolve_response("goodbye") == "Echo: goodbye"
def test_responses_file_missing_falls_back(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv("LLM_MATCHED_RESPONSES_FILE", "/nonexistent/path.json")
assert resolve_response("hello") == "Echo: hello"
def test_responses_file_invalid_json_raises(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
bad_file = tmp_path / "bad.json"
bad_file.write_text("not valid json {{{")
monkeypatch.setenv("LLM_MATCHED_RESPONSES_FILE", str(bad_file))
with pytest.raises(json.JSONDecodeError):
resolve_response("hello")
def test_supports_tools_attribute() -> None:
model = MatchedResponsesModel()
assert model.supports_tools is True
def test_try_parse_tool_calls_valid() -> None:
reply = json.dumps({
"tool_calls": [{"name": "my_tool", "arguments": {"x": 1}}],
"text": "hello",
})
result = _try_parse_tool_calls(reply)
assert result is not None
assert len(result["tool_calls"]) == 1
assert result["tool_calls"][0]["name"] == "my_tool"
assert result["text"] == "hello"
def test_try_parse_tool_calls_plain_text() -> None:
assert _try_parse_tool_calls("Echo: hello") is None
def test_try_parse_tool_calls_json_without_tool_calls() -> None:
assert _try_parse_tool_calls(json.dumps({"foo": "bar"})) is None
def test_try_parse_tool_calls_not_a_dict() -> None:
assert _try_parse_tool_calls(json.dumps([1, 2, 3])) is None
def test_execute_with_tools_returns_tool_calls(monkeypatch: pytest.MonkeyPatch) -> None:
"""When tools are provided and response contains tool_calls JSON, the model emits tool calls."""
tool_response = json.dumps({
"tool_calls": [{"name": "get_weather", "arguments": {"city": "SF"}}],
"text": "Calling tool",
})
monkeypatch.setenv("LLM_MATCHED_RESPONSE", tool_response)
model = MatchedResponsesModel()
response = model.prompt("what's the weather?", tools=[_dummy_tool()])
text = response.text()
assert text == "Calling tool"
assert len(response.tool_calls()) == 1
assert response.tool_calls()[0].name == "get_weather"
assert response.tool_calls()[0].arguments == {"city": "SF"}
def test_execute_with_tools_plain_text(monkeypatch: pytest.MonkeyPatch) -> None:
"""When tools are provided but response is plain text, no tool calls are emitted."""
monkeypatch.setenv("LLM_MATCHED_RESPONSE", "just text")
model = MatchedResponsesModel()
response = model.prompt("hello", tools=[_dummy_tool()])
assert response.text() == "just text"
assert response.tool_calls() == []
def test_execute_without_tools_ignores_tool_calls_json(monkeypatch: pytest.MonkeyPatch) -> None:
"""When no tools are provided, tool_calls JSON is returned as plain text."""
tool_response = json.dumps({
"tool_calls": [{"name": "get_weather", "arguments": {}}],
})
monkeypatch.setenv("LLM_MATCHED_RESPONSE", tool_response)
model = MatchedResponsesModel()
response = model.prompt("hello")
assert response.text() == tool_response
def _dummy_tool() -> llm.Tool:
"""Create a minimal Tool for testing."""
return llm.Tool(
name="get_weather",
description="Get weather for a city",
input_schema={
"type": "object",
"properties": {"city": {"type": "string"}},
},
implementation=lambda city: f"Weather in {city}: sunny",
)