Skip to content

Removed tool_names validation for gen_answer being present #685

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 1 addition & 7 deletions paperqa/agents/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ def settings_to_tools(
summary_llm_model = summary_llm_model or settings.get_summary_llm()
embedding_model = embedding_model or settings.get_embedding_model()
tools: list[Tool] = []
has_answer_tool = False
for tool_type in (
(PaperSearch, GatherEvidence, GenerateAnswer)
if settings.agent.tool_names is None
Expand Down Expand Up @@ -86,14 +85,9 @@ def settings_to_tools(
else:
raise NotImplementedError(f"Didn't handle tool type {tool_type}.")
if tool.info.name == GenerateAnswer.gen_answer.__name__:
tools.append(tool)
has_answer_tool = True
tools.append(tool) # Place at the end
else:
tools.insert(0, tool)
if not has_answer_tool:
raise ValueError(
f"{GenerateAnswer.gen_answer.__name__} must be one of the tools."
)
return tools


Expand Down
15 changes: 0 additions & 15 deletions paperqa/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,21 +523,6 @@ class AgentSettings(BaseModel):
exclude=True,
)

@field_validator("tool_names")
@classmethod
def validate_tool_names(cls, v: set[str] | None) -> set[str] | None:
if v is None:
return None
# imported here to avoid circular imports
from paperqa.agents.tools import GenerateAnswer

answer_tool_name = GenerateAnswer.TOOL_FN_NAME
if answer_tool_name not in v:
raise ValueError(
f"If using an override, must contain at least the {answer_tool_name}."
)
return v

@model_validator(mode="after")
def _deprecated_field(self) -> Self:
for deprecated_field_name, new_name in (("index_concurrency", "concurrency"),):
Expand Down
26 changes: 3 additions & 23 deletions tests/test_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import time
from copy import deepcopy
from pathlib import Path
from typing import Any, cast
from typing import cast
from unittest.mock import AsyncMock, patch
from uuid import uuid4

Expand All @@ -21,7 +21,6 @@
from ldp.graph.memory import Memory, UIndexMemoryModel
from ldp.graph.ops import OpResult
from ldp.llms import EmbeddingModel, MultipleCompletionLLMModel
from pydantic import ValidationError
from pytest_subtests import SubTests
from tantivy import Index

Expand Down Expand Up @@ -402,8 +401,8 @@ async def test_propagate_options(agent_test_settings: Settings) -> None:
async def test_gather_evidence_rejects_empty_docs(
agent_test_settings: Settings,
) -> None:
# Patch GenerateAnswerTool._arun so that if this tool is chosen first, we
# don't give a 'cannot answer' response. A 'cannot answer' response can
# Patch GenerateAnswerTool.gen_answer so that if this tool is chosen first,
# we don't give a 'cannot answer' response. A 'cannot answer' response can
# lead to an unsure status, which will break this test's assertions. Since
# this test is about a GatherEvidenceTool edge case, defeating
# GenerateAnswerTool is fine
Expand Down Expand Up @@ -726,25 +725,6 @@ def test_answers_are_striped() -> None:
response.model_dump_json()


@pytest.mark.parametrize(
("kwargs", "result"),
[
({}, None),
({"tool_names": {GenerateAnswer.TOOL_FN_NAME}}, None),
({"tool_names": set()}, ValidationError),
({"tool_names": {PaperSearch.TOOL_FN_NAME}}, ValidationError),
],
)
def test_agent_prompt_collection_validations(
kwargs: dict[str, Any], result: type[Exception] | None
) -> None:
if result is None:
AgentSettings(**kwargs)
else:
with pytest.raises(result):
AgentSettings(**kwargs)


class TestGradablePaperQAEnvironment:
@pytest.mark.flaky(reruns=2, only_rerun=["AssertionError"])
@pytest.mark.asyncio
Expand Down