Skip to content

Commit cf2da86

Browse files
committed
Prevent *Is typing* from disappearing instantly while streaming
1 parent 4146ac4 commit cf2da86

File tree

1 file changed

+4
-2
lines changed

1 file changed

+4
-2
lines changed

modules/text_generation.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,8 @@ def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typi
101101
reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k)
102102
yield formatted_outputs(reply, shared.model_name)
103103
else:
104-
yield formatted_outputs(question, shared.model_name)
104+
if not (shared.args.chat or shared.args.cai_chat):
105+
yield formatted_outputs(question, shared.model_name)
105106
# RWKV has proper streaming, which is very nice.
106107
# No need to generate 8 tokens at a time.
107108
for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k):
@@ -197,7 +198,8 @@ def generate_with_callback(callback=None, **kwargs):
197198
def generate_with_streaming(**kwargs):
198199
return Iteratorize(generate_with_callback, kwargs, callback=None)
199200

200-
yield formatted_outputs(original_question, shared.model_name)
201+
if not (shared.args.chat or shared.args.cai_chat):
202+
yield formatted_outputs(original_question, shared.model_name)
201203
with generate_with_streaming(**generate_params) as generator:
202204
for output in generator:
203205
if shared.soft_prompt:

0 commit comments

Comments
 (0)