Skip to content

Commit bcd8afd

Browse files
authored
Merge pull request #393 from WojtekKowaluk/mps_support
Fix for MPS support on Apple Silicon
2 parents dc35861 + e26763a commit bcd8afd

File tree

3 files changed

+12
-2
lines changed

3 files changed

+12
-2
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ torch-dumps/*
99
*pycache*
1010
*/*pycache*
1111
*/*/pycache*
12+
venv/
13+
.venv/
1214

1315
settings.json
1416
img_bot*

modules/models.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,12 @@ def load_model(model_name):
4747
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
4848
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
4949
else:
50-
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda()
50+
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16)
51+
if torch.has_mps:
52+
device = torch.device('mps')
53+
model = model.to(device)
54+
else:
55+
model = model.cuda()
5156

5257
# FlexGen
5358
elif shared.args.flexgen:
@@ -97,7 +102,7 @@ def load_model(model_name):
97102
# Custom
98103
else:
99104
params = {"low_cpu_mem_usage": True}
100-
if not shared.args.cpu and not torch.cuda.is_available():
105+
if not any((shared.args.cpu, torch.cuda.is_available(), torch.has_mps)):
101106
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
102107
shared.args.cpu = True
103108

modules/text_generation.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
3333
return input_ids.numpy()
3434
elif shared.args.deepspeed:
3535
return input_ids.to(device=local_rank)
36+
elif torch.has_mps:
37+
device = torch.device('mps')
38+
return input_ids.to(device)
3639
else:
3740
return input_ids.cuda()
3841

0 commit comments

Comments
 (0)