Skip to content

Commit 2746064

Browse files
committed
fixes: expose flag to disable token counter (#11344)
* fixes: expose flag to disable token counter * fix add disable_token_counter
1 parent 68d1cb1 commit 2746064

File tree

4 files changed

+22
-3
lines changed

4 files changed

+22
-3
lines changed

litellm/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,7 @@
216216
ssl_verify: Union[str, bool] = True
217217
ssl_certificate: Optional[str] = None
218218
disable_streaming_logging: bool = False
219+
disable_token_counter: bool = False
219220
disable_add_transform_inline_image_block: bool = False
220221
in_memory_llm_clients_cache: LLMClientCache = LLMClientCache()
221222
safe_memory_mode: bool = False

litellm/litellm_core_utils/token_counter.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,15 @@ def token_counter(
362362
"""
363363
from litellm.utils import convert_list_message_to_dict
364364

365+
#########################################################
366+
# Flag to disable token counter
367+
# We've gotten reports of this consuming CPU cycles,
368+
# exposing this flag to allow users to disable
369+
# it to confirm if this is indeed the issue
370+
#########################################################
371+
if litellm.disable_token_counter is True:
372+
return 0
373+
365374
verbose_logger.debug(
366375
f"messages in token_counter: {messages}, text in token_counter: {text}"
367376
)

litellm/proxy/proxy_config.yaml

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,5 @@ model_list:
44
model: openai/*
55

66

7-
8-
9-
7+
litellm_settings:
8+
disable_token_counter: True

litellm/utils.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1697,6 +1697,16 @@ def token_counter(
16971697
16981698
Kept for backwards compatibility.
16991699
"""
1700+
1701+
#########################################################
1702+
# Flag to disable token counter
1703+
# We've gotten reports of this consuming CPU cycles,
1704+
# exposing this flag to allow users to disable
1705+
# it to confirm if this is indeed the issue
1706+
#########################################################
1707+
if litellm.disable_token_counter is True:
1708+
return 0
1709+
17001710
return token_counter_new(
17011711
model,
17021712
custom_tokenizer,

0 commit comments

Comments
 (0)