Skip to content

Commit 2ea88db

Browse files
authored
Merge pull request oobabooga#295 from Zerogoki00/opt4-bit
Add support for quantized OPT models
2 parents d3a8b0d + fa13b56 commit 2ea88db

File tree

4 files changed

+42
-23
lines changed

4 files changed

+42
-23
lines changed

README.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -140,8 +140,9 @@ Optionally, you can use the following command-line flags:
140140
| `--cai-chat` | Launch the web UI in chat mode with a style similar to Character.AI's. If the file `img_bot.png` or `img_bot.jpg` exists in the same folder as server.py, this image will be used as the bot's profile picture. Similarly, `img_me.png` or `img_me.jpg` will be used as your profile picture. |
141141
| `--cpu` | Use the CPU to generate text.|
142142
| `--load-in-8bit` | Load the model with 8-bit precision.|
143-
| `--load-in-4bit` | Load the model with 4-bit precision. Currently only works with LLaMA.|
144-
| `--gptq-bits GPTQ_BITS` | Load a pre-quantized model with specified precision. 2, 3, 4 and 8 (bit) are supported. Currently only works with LLaMA. |
143+
| `--load-in-4bit` | DEPRECATED: use `--gptq-bits 4` instead. |
144+
| `--gptq-bits GPTQ_BITS` | Load a pre-quantized model with specified precision. 2, 3, 4 and 8 (bit) are supported. Currently only works with LLaMA and OPT. |
145+
| `--gptq-model-type MODEL_TYPE` | Model type of pre-quantized model. Currently only LLaMa and OPT are supported. |
145146
| `--bf16` | Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU. |
146147
| `--auto-devices` | Automatically split the model across the available GPU(s) and CPU.|
147148
| `--disk` | If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk. |

modules/quantized_LLaMA.py renamed to modules/GPTQ_loader.py

Lines changed: 25 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -7,28 +7,40 @@
77
import modules.shared as shared
88

99
sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
10-
from llama import load_quant
10+
import llama
11+
import opt
1112

1213

13-
# 4-bit LLaMA
14-
def load_quantized_LLaMA(model_name):
15-
if shared.args.load_in_4bit:
16-
bits = 4
14+
def load_quantized(model_name):
15+
if not shared.args.gptq_model_type:
16+
# Try to determine model type from model name
17+
model_type = model_name.split('-')[0].lower()
18+
if model_type not in ('llama', 'opt'):
19+
print("Can't determine model type from model name. Please specify it manually using --gptq-model-type "
20+
"argument")
21+
exit()
1722
else:
18-
bits = shared.args.gptq_bits
23+
model_type = shared.args.gptq_model_type.lower()
24+
25+
if model_type == 'llama':
26+
load_quant = llama.load_quant
27+
elif model_type == 'opt':
28+
load_quant = opt.load_quant
29+
else:
30+
print("Unknown pre-quantized model type specified. Only 'llama' and 'opt' are supported")
31+
exit()
1932

2033
path_to_model = Path(f'models/{model_name}')
21-
pt_model = ''
2234
if path_to_model.name.lower().startswith('llama-7b'):
23-
pt_model = f'llama-7b-{bits}bit.pt'
35+
pt_model = f'llama-7b-{shared.args.gptq_bits}bit.pt'
2436
elif path_to_model.name.lower().startswith('llama-13b'):
25-
pt_model = f'llama-13b-{bits}bit.pt'
37+
pt_model = f'llama-13b-{shared.args.gptq_bits}bit.pt'
2638
elif path_to_model.name.lower().startswith('llama-30b'):
27-
pt_model = f'llama-30b-{bits}bit.pt'
39+
pt_model = f'llama-30b-{shared.args.gptq_bits}bit.pt'
2840
elif path_to_model.name.lower().startswith('llama-65b'):
29-
pt_model = f'llama-65b-{bits}bit.pt'
41+
pt_model = f'llama-65b-{shared.args.gptq_bits}bit.pt'
3042
else:
31-
pt_model = f'{model_name}-{bits}bit.pt'
43+
pt_model = f'{model_name}-{shared.args.gptq_bits}bit.pt'
3244

3345
# Try to find the .pt both in models/ and in the subfolder
3446
pt_path = None
@@ -40,7 +52,7 @@ def load_quantized_LLaMA(model_name):
4052
print(f"Could not find {pt_model}, exiting...")
4153
exit()
4254

43-
model = load_quant(str(path_to_model), str(pt_path), bits)
55+
model = load_quant(str(path_to_model), str(pt_path), shared.args.gptq_bits)
4456

4557
# Multiple GPUs or GPU+CPU
4658
if shared.args.gpu_memory:

modules/models.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
import json
22
import os
3-
import sys
43
import time
54
import zipfile
65
from pathlib import Path
@@ -35,14 +34,15 @@
3534
ds_config = generate_ds_config(shared.args.bf16, 1 * world_size, shared.args.nvme_offload_dir)
3635
dschf = HfDeepSpeedConfig(ds_config) # Keep this object alive for the Transformers integration
3736

37+
3838
def load_model(model_name):
3939
print(f"Loading {model_name}...")
4040
t0 = time.time()
4141

4242
shared.is_RWKV = model_name.lower().startswith('rwkv-')
4343

4444
# Default settings
45-
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.load_in_4bit, shared.args.gptq_bits > 0, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
45+
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
4646
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
4747
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
4848
else:
@@ -87,11 +87,11 @@ def load_model(model_name):
8787

8888
return model, tokenizer
8989

90-
# 4-bit LLaMA
91-
elif shared.args.gptq_bits > 0 or shared.args.load_in_4bit:
92-
from modules.quantized_LLaMA import load_quantized_LLaMA
90+
# Quantized model
91+
elif shared.args.gptq_bits > 0:
92+
from modules.GPTQ_loader import load_quantized
9393

94-
model = load_quantized_LLaMA(model_name)
94+
model = load_quantized(model_name)
9595

9696
# Custom
9797
else:

modules/shared.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,9 @@ def str2bool(v):
6969
parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
7070
parser.add_argument('--cpu', action='store_true', help='Use the CPU to generate text.')
7171
parser.add_argument('--load-in-8bit', action='store_true', help='Load the model with 8-bit precision.')
72-
parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision. Currently only works with LLaMA.')
73-
parser.add_argument('--gptq-bits', type=int, default=0, help='Load a pre-quantized model with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA.')
72+
parser.add_argument('--load-in-4bit', action='store_true', help='DEPRECATED: use --gptq-bits 4 instead.')
73+
parser.add_argument('--gptq-bits', type=int, default=0, help='Load a pre-quantized model with specified precision. 2, 3, 4 and 8bit are supported. Currently only works with LLaMA and OPT.')
74+
parser.add_argument('--gptq-model-type', type=str, help='Model type of pre-quantized model. Currently only LLaMa and OPT are supported.')
7475
parser.add_argument('--bf16', action='store_true', help='Load the model with bfloat16 precision. Requires NVIDIA Ampere GPU.')
7576
parser.add_argument('--auto-devices', action='store_true', help='Automatically split the model across the available GPU(s) and CPU.')
7677
parser.add_argument('--disk', action='store_true', help='If the model is too large for your GPU(s) and CPU combined, send the remaining layers to the disk.')
@@ -95,3 +96,8 @@ def str2bool(v):
9596
parser.add_argument('--auto-launch', action='store_true', default=False, help='Open the web UI in the default browser upon launch.')
9697
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
9798
args = parser.parse_args()
99+
100+
# Provisional, this will be deleted later
101+
if args.load_in_4bit:
102+
print("Warning: --load-in-4bit is deprecated and will be removed. Use --gptq-bits 4 instead.\n")
103+
args.gptq_bits = 4

0 commit comments

Comments
 (0)