@@ -270,6 +270,29 @@ def get_chat_template_by_model_path(model_path):
270
270
)
271
271
)
272
272
273
+ register_chat_template (
274
+ ChatTemplate (
275
+ name = "janus" ,
276
+ default_system_prompt = None ,
277
+ role_prefix_and_suffix = {
278
+ "system" : (
279
+ "" ,
280
+ "" ,
281
+ ),
282
+ "user" : (
283
+ "<|User|>" ,
284
+ "" ,
285
+ ),
286
+ "assistant" : (
287
+ "<|Assistant|>" ,
288
+ "<|end▁of▁sentence|>" ,
289
+ ),
290
+ },
291
+ stop_str = ("<|end▁of▁sentence|>" ,),
292
+ image_token = "<image_placeholder>\n " ,
293
+ )
294
+ )
295
+
273
296
# The difference between "llama-3-instruct-llava" and "llama-3-instruct" is that llava uses a different image_token.
274
297
register_chat_template (
275
298
ChatTemplate (
@@ -395,6 +418,20 @@ def get_chat_template_by_model_path(model_path):
395
418
)
396
419
)
397
420
421
+ # Adapted from https://huggingface.co/OpenGVLab/InternVL2-4B/blob/main/modeling_intern_vit.py
422
+ register_chat_template (
423
+ ChatTemplate (
424
+ name = "internvl-2-5" ,
425
+ default_system_prompt = "你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。" ,
426
+ role_prefix_and_suffix = {
427
+ "system" : ("<|im_start|>system\n " , "<|im_end|>\n " ),
428
+ "user" : ("<|im_start|>user\n " , "<|im_end|>\n " ),
429
+ "assistant" : ("<|im_start|>assistant\n " , "<|im_end|>\n " ),
430
+ },
431
+ stop_str = ["<|im_end|>" , "<|action_end|>" ],
432
+ )
433
+ )
434
+
398
435
register_chat_template (
399
436
ChatTemplate (
400
437
name = "granite-3-instruct" ,
@@ -565,6 +602,13 @@ def match_gemma3_instruct(model_path: str):
565
602
return get_chat_template ("gemma-it" )
566
603
567
604
605
+ @register_chat_template_matching_function
606
+ def match_internvl_chat (model_path : str ):
607
+ model_path = model_path .lower ()
608
+ if "internvl" in model_path :
609
+ return get_chat_template ("internvl-2-5" )
610
+
611
+
568
612
if __name__ == "__main__" :
569
613
messages = [
570
614
{"role" : "system" , "content" : None }, # None means default
0 commit comments