We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 1b99ed6 commit 3c9afd5Copy full SHA for 3c9afd5
modules/models.py
@@ -89,9 +89,9 @@ def load_model(model_name):
89
90
# Quantized model
91
elif shared.args.gptq_bits > 0:
92
- from modules.quant_loader import load_quant
+ from modules.quant_loader import load_quantized
93
94
- model = load_quant(model_name, shared.args.gptq_model_type)
+ model = load_quantized(model_name, shared.args.gptq_model_type)
95
96
# Custom
97
else:
modules/quant_loader.py
@@ -10,7 +10,7 @@
10
11
12
# 4-bit LLaMA
13
-def load_quant(model_name, model_type):
+def load_quantized(model_name, model_type):
14
if model_type == 'llama':
15
from llama import load_quant
16
elif model_type == 'opt':
0 commit comments