Skip to content

Commit 4c13067

Browse files
authored
Merge pull request oobabooga#377 from askmyteapot/Fix-Multi-gpu-GPTQ-Llama-no-tokens
Update GPTQ_Loader.py
2 parents ee164d1 + 53b6a66 commit 4c13067

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

modules/GPTQ_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def load_quantized(model_name):
6161
max_memory[i] = f"{shared.args.gpu_memory[i]}GiB"
6262
max_memory['cpu'] = f"{shared.args.cpu_memory or '99'}GiB"
6363

64-
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LLaMADecoderLayer"])
64+
device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"])
6565
model = accelerate.dispatch_model(model, device_map=device_map)
6666

6767
# Single GPU

0 commit comments

Comments
 (0)