Skip to content

Commit 48ab5e3

Browse files
committed
feat(langchain): add ruff rules SLF
1 parent 98bfd57 commit 48ab5e3

File tree

11 files changed

+24
-25
lines changed

11 files changed

+24
-25
lines changed

libs/langchain/langchain/agents/agent_iterator.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -190,14 +190,14 @@ def __iter__(self: AgentExecutorIterator) -> Iterator[AddableDict]:
190190
name=self.run_name,
191191
)
192192
try:
193-
while self.agent_executor._should_continue(
193+
while self.agent_executor._should_continue( # noqa: SLF001
194194
self.iterations,
195195
self.time_elapsed,
196196
):
197197
# take the next step: this plans next action, executes it,
198198
# yielding action and observation as they are generated
199199
next_step_seq: NextStepOutput = []
200-
for chunk in self.agent_executor._iter_next_step(
200+
for chunk in self.agent_executor._iter_next_step( # noqa: SLF001
201201
self.name_to_tool_map,
202202
self.color_mapping,
203203
self.inputs,
@@ -214,7 +214,7 @@ def __iter__(self: AgentExecutorIterator) -> Iterator[AddableDict]:
214214
yield AddableDict(steps=[chunk], messages=chunk.messages)
215215

216216
# convert iterator output to format handled by _process_next_step_output
217-
next_step = self.agent_executor._consume_next_step(next_step_seq)
217+
next_step = self.agent_executor._consume_next_step(next_step_seq) # noqa: SLF001
218218
# update iterations and time elapsed
219219
self.update_iterations()
220220
# decide if this is the final output
@@ -258,14 +258,14 @@ async def __aiter__(self) -> AsyncIterator[AddableDict]:
258258
)
259259
try:
260260
async with asyncio_timeout(self.agent_executor.max_execution_time):
261-
while self.agent_executor._should_continue(
261+
while self.agent_executor._should_continue( # noqa: SLF001
262262
self.iterations,
263263
self.time_elapsed,
264264
):
265265
# take the next step: this plans next action, executes it,
266266
# yielding action and observation as they are generated
267267
next_step_seq: NextStepOutput = []
268-
async for chunk in self.agent_executor._aiter_next_step(
268+
async for chunk in self.agent_executor._aiter_next_step( # noqa: SLF001
269269
self.name_to_tool_map,
270270
self.color_mapping,
271271
self.inputs,
@@ -288,7 +288,7 @@ async def __aiter__(self) -> AsyncIterator[AddableDict]:
288288
)
289289

290290
# convert iterator output to format handled by _process_next_step
291-
next_step = self.agent_executor._consume_next_step(next_step_seq)
291+
next_step = self.agent_executor._consume_next_step(next_step_seq) # noqa: SLF001
292292
# update iterations and time elapsed
293293
self.update_iterations()
294294
# decide if this is the final output
@@ -336,7 +336,7 @@ def _process_next_step_output(
336336
# Check for tool return
337337
if len(next_step_output) == 1:
338338
next_step_action = next_step_output[0]
339-
tool_return = self.agent_executor._get_tool_return(next_step_action)
339+
tool_return = self.agent_executor._get_tool_return(next_step_action) # noqa: SLF001
340340
if tool_return is not None:
341341
return self._return(tool_return, run_manager=run_manager)
342342

@@ -364,7 +364,7 @@ async def _aprocess_next_step_output(
364364
# Check for tool return
365365
if len(next_step_output) == 1:
366366
next_step_action = next_step_output[0]
367-
tool_return = self.agent_executor._get_tool_return(next_step_action)
367+
tool_return = self.agent_executor._get_tool_return(next_step_action) # noqa: SLF001
368368
if tool_return is not None:
369369
return await self._areturn(tool_return, run_manager=run_manager)
370370

@@ -376,7 +376,7 @@ def _stop(self, run_manager: CallbackManagerForChainRun) -> AddableDict:
376376
"""
377377
logger.warning("Stopping agent prematurely due to triggering stop condition")
378378
# this manually constructs agent finish with output key
379-
output = self.agent_executor._action_agent.return_stopped_response(
379+
output = self.agent_executor._action_agent.return_stopped_response( # noqa: SLF001
380380
self.agent_executor.early_stopping_method,
381381
self.intermediate_steps,
382382
**self.inputs,
@@ -389,7 +389,7 @@ async def _astop(self, run_manager: AsyncCallbackManagerForChainRun) -> AddableD
389389
the stopped response.
390390
"""
391391
logger.warning("Stopping agent prematurely due to triggering stop condition")
392-
output = self.agent_executor._action_agent.return_stopped_response(
392+
output = self.agent_executor._action_agent.return_stopped_response( # noqa: SLF001
393393
self.agent_executor.early_stopping_method,
394394
self.intermediate_steps,
395395
**self.inputs,
@@ -404,7 +404,7 @@ def _return(
404404
"""
405405
Return the final output of the iterator.
406406
"""
407-
returned_output = self.agent_executor._return(
407+
returned_output = self.agent_executor._return( # noqa: SLF001
408408
output,
409409
self.intermediate_steps,
410410
run_manager=run_manager,
@@ -421,7 +421,7 @@ async def _areturn(
421421
"""
422422
Return the final output of the async iterator.
423423
"""
424-
returned_output = await self.agent_executor._areturn(
424+
returned_output = await self.agent_executor._areturn( # noqa: SLF001
425425
output,
426426
self.intermediate_steps,
427427
run_manager=run_manager,

libs/langchain/langchain/agents/initialize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def initialize_agent(
9090
)
9191
with contextlib.suppress(NotImplementedError):
9292
# TODO: Add tags from the serialized object directly.
93-
tags_.append(agent_obj._agent_type)
93+
tags_.append(agent_obj._agent_type) # noqa: SLF001
9494
else:
9595
msg = (
9696
"Somehow both `agent` and `agent_path` are None, this should never happen."

libs/langchain/langchain/agents/openai_functions_agent/base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -134,7 +134,7 @@ def plan(
134134
messages,
135135
callbacks=callbacks,
136136
)
137-
return self.output_parser._parse_ai_message(predicted_message)
137+
return self.output_parser.parse_ai_message(predicted_message)
138138

139139
async def aplan(
140140
self,
@@ -167,7 +167,7 @@ async def aplan(
167167
functions=self.functions,
168168
callbacks=callbacks,
169169
)
170-
return self.output_parser._parse_ai_message(predicted_message)
170+
return self.output_parser.parse_ai_message(predicted_message)
171171

172172
def return_stopped_response(
173173
self,

libs/langchain/langchain/agents/output_parsers/openai_functions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def _type(self) -> str:
3030
return "openai-functions-agent"
3131

3232
@staticmethod
33-
def _parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
33+
def parse_ai_message(message: BaseMessage) -> Union[AgentAction, AgentFinish]:
3434
"""Parse an AI message."""
3535
if not isinstance(message, AIMessage):
3636
msg = f"Expected an AI message got {type(message)}"
@@ -89,7 +89,7 @@ def parse_result(
8989
msg = "This output parser only works on ChatGeneration output"
9090
raise ValueError(msg)
9191
message = result[0].message
92-
return self._parse_ai_message(message)
92+
return self.parse_ai_message(message)
9393

9494
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
9595
msg = "Can only parse messages"

libs/langchain/langchain/chains/combine_documents/stuff.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ def prompt_length(self, docs: list[Document], **kwargs: Any) -> Optional[int]:
240240
"""
241241
inputs = self._get_inputs(docs, **kwargs)
242242
prompt = self.llm_chain.prompt.format(**inputs)
243-
return self.llm_chain._get_num_tokens(prompt)
243+
return self.llm_chain.get_num_tokens(prompt)
244244

245245
def combine_docs(
246246
self,

libs/langchain/langchain/chains/conversational_retrieval/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
388388
StuffDocumentsChain,
389389
):
390390
tokens = [
391-
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content)
391+
self.combine_docs_chain.llm_chain.get_num_tokens(doc.page_content)
392392
for doc in docs
393393
]
394394
token_count = sum(tokens[:num_docs])

libs/langchain/langchain/chains/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,7 @@ def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain:
422422
prompt_template = PromptTemplate.from_template(template)
423423
return cls(llm=llm, prompt=prompt_template)
424424

425-
def _get_num_tokens(self, text: str) -> int:
425+
def get_num_tokens(self, text: str) -> int:
426426
return _get_language_model(self.llm).get_num_tokens(text)
427427

428428

libs/langchain/langchain/chains/qa_with_sources/retrieval.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
3333
StuffDocumentsChain,
3434
):
3535
tokens = [
36-
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
36+
self.combine_documents_chain.llm_chain.get_num_tokens(doc.page_content)
3737
for doc in docs
3838
]
3939
token_count = sum(tokens[:num_docs])

libs/langchain/langchain/chains/qa_with_sources/vector_db.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
3838
StuffDocumentsChain,
3939
):
4040
tokens = [
41-
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
41+
self.combine_documents_chain.llm_chain.get_num_tokens(doc.page_content)
4242
for doc in docs
4343
]
4444
token_count = sum(tokens[:num_docs])

libs/langchain/langchain/output_parsers/combining.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,10 @@ def validate_parsers(cls, values: dict[str, Any]) -> dict[str, Any]:
2323
msg = "Must have at least two parsers"
2424
raise ValueError(msg)
2525
for parser in parsers:
26-
if parser._type == "combining":
26+
if parser._type == "combining": # noqa: SLF001
2727
msg = "Cannot nest combining parsers"
2828
raise ValueError(msg)
29-
if parser._type == "list":
29+
if parser._type == "list": # noqa: SLF001
3030
msg = "Cannot combine list parsers"
3131
raise ValueError(msg)
3232
return values

0 commit comments

Comments
 (0)