Skip to content

Commit c4ad5f6

Browse files
committed
Rollback change on _get_num_tokens
1 parent 48ab5e3 commit c4ad5f6

File tree

5 files changed

+5
-5
lines changed

5 files changed

+5
-5
lines changed

libs/langchain/langchain/chains/combine_documents/stuff.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ def prompt_length(self, docs: list[Document], **kwargs: Any) -> Optional[int]:
240240
"""
241241
inputs = self._get_inputs(docs, **kwargs)
242242
prompt = self.llm_chain.prompt.format(**inputs)
243-
return self.llm_chain.get_num_tokens(prompt)
243+
return self.llm_chain._get_num_tokens(prompt) # noqa: SLF001
244244

245245
def combine_docs(
246246
self,

libs/langchain/langchain/chains/conversational_retrieval/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
388388
StuffDocumentsChain,
389389
):
390390
tokens = [
391-
self.combine_docs_chain.llm_chain.get_num_tokens(doc.page_content)
391+
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
392392
for doc in docs
393393
]
394394
token_count = sum(tokens[:num_docs])

libs/langchain/langchain/chains/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -422,7 +422,7 @@ def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain:
422422
prompt_template = PromptTemplate.from_template(template)
423423
return cls(llm=llm, prompt=prompt_template)
424424

425-
def get_num_tokens(self, text: str) -> int:
425+
def _get_num_tokens(self, text: str) -> int:
426426
return _get_language_model(self.llm).get_num_tokens(text)
427427

428428

libs/langchain/langchain/chains/qa_with_sources/retrieval.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
3333
StuffDocumentsChain,
3434
):
3535
tokens = [
36-
self.combine_documents_chain.llm_chain.get_num_tokens(doc.page_content)
36+
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
3737
for doc in docs
3838
]
3939
token_count = sum(tokens[:num_docs])

libs/langchain/langchain/chains/qa_with_sources/vector_db.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
3838
StuffDocumentsChain,
3939
):
4040
tokens = [
41-
self.combine_documents_chain.llm_chain.get_num_tokens(doc.page_content)
41+
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
4242
for doc in docs
4343
]
4444
token_count = sum(tokens[:num_docs])

0 commit comments

Comments
 (0)