Skip to content

Commit 646dad2

Browse files
authored
Merge pull request #4005 from Trojaner/fix-invalid-prompt-caching-on-lora-load
Fix generations that trigger LoRA loads not applying text encoders of the just loaded LoRAs on themselves
2 parents 198382c + f3d6245 commit 646dad2

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

modules/processing_args.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,8 @@ def set_pipeline_args(p, model, prompts:list, negative_prompts:list, prompts_2:t
148148
steps = kwargs.get("num_inference_steps", None) or len(getattr(p, 'timesteps', ['1']))
149149
clip_skip = kwargs.pop("clip_skip", 1)
150150

151+
extra_networks.activate(p, include=['text_encoder', 'text_encoder_2', 'text_encoder_3'])
152+
151153
parser = 'fixed'
152154
prompt_attention = prompt_attention or shared.opts.prompt_attention
153155
if (prompt_attention != 'fixed') and ('Onnx' not in model.__class__.__name__) and ('prompt' not in p.task_args) and (
@@ -168,7 +170,6 @@ def set_pipeline_args(p, model, prompts:list, negative_prompts:list, prompts_2:t
168170
else:
169171
prompt_parser_diffusers.embedder = None
170172

171-
extra_networks.activate(p, include=['text_encoder', 'text_encoder_2', 'text_encoder_3'])
172173
if 'prompt' in possible:
173174
if 'OmniGen' in model.__class__.__name__:
174175
prompts = [p.replace('|image|', '<img><|image_1|></img>') for p in prompts]

0 commit comments

Comments
 (0)