Skip to content

Commit 4fe78fc

Browse files
committed
added download information
1 parent b05634f commit 4fe78fc

File tree

1 file changed

+26
-8
lines changed

1 file changed

+26
-8
lines changed

src/open_codex/agents/phi_4_mini.py

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,21 +7,39 @@
77
from huggingface_hub import hf_hub_download # type: ignore
88

99
class AgentPhi4Mini(LLMAgent):
10-
def __init__(self, system_prompt: str):
11-
model_filename = "Phi-4-mini-instruct-Q3_K_L.gguf"
12-
repo_id = "lmstudio-community/Phi-4-mini-instruct-GGUF"
13-
14-
print("\n⏬ Downloading model from Hugging Face...")
10+
def download_model(self, model_filename: str,
11+
repo_id: str,
12+
local_dir: str) -> str:
13+
print(
14+
"\n🤖 Thank you for using Open Codex!\n"
15+
"📦 For the first run, we need to download the model from Hugging Face.\n"
16+
"⏬ This only happens once – it’ll be cached locally for future use.\n"
17+
"🔄 Sit tight, the download will begin now...\n"
18+
)
19+
print("\n⏬ Downloading model phi4-mini ...")
20+
1521
start = time.time()
1622
model_path:str = hf_hub_download(
1723
repo_id=repo_id,
1824
filename=model_filename,
19-
local_dir=os.path.expanduser("~/.cache/open-codex"),
20-
local_dir_use_symlinks=False,
21-
resume_download=True
25+
local_dir=local_dir,
2226
)
2327
end = time.time()
2428
print(f"✅ Model downloaded in {end - start:.2f}s\n")
29+
return model_path
30+
31+
def __init__(self, system_prompt: str):
32+
model_filename = "Phi-4-mini-instruct-Q3_K_L.gguf"
33+
repo_id = "lmstudio-community/Phi-4-mini-instruct-GGUF"
34+
local_dir = os.path.expanduser("~/.cache/open-codex")
35+
model_path = os.path.join(local_dir, model_filename)
36+
37+
# check if the model is already downloaded
38+
if not os.path.exists(model_path):
39+
# download the model
40+
model_path = self.download_model(model_filename, repo_id, local_dir)
41+
else:
42+
print(f"We are locking and loading the model for you...\n")
2543

2644
# suppress the stderr output from llama_cpp
2745
# this is a workaround for the llama_cpp library

0 commit comments

Comments
 (0)