|
7 | 7 | from huggingface_hub import hf_hub_download # type: ignore
|
8 | 8 |
|
9 | 9 | class AgentPhi4Mini(LLMAgent):
|
10 |
| - def __init__(self, system_prompt: str): |
11 |
| - model_filename = "Phi-4-mini-instruct-Q3_K_L.gguf" |
12 |
| - repo_id = "lmstudio-community/Phi-4-mini-instruct-GGUF" |
13 |
| - |
14 |
| - print("\n⏬ Downloading model from Hugging Face...") |
| 10 | + def download_model(self, model_filename: str, |
| 11 | + repo_id: str, |
| 12 | + local_dir: str) -> str: |
| 13 | + print( |
| 14 | + "\n🤖 Thank you for using Open Codex!\n" |
| 15 | + "📦 For the first run, we need to download the model from Hugging Face.\n" |
| 16 | + "⏬ This only happens once – it’ll be cached locally for future use.\n" |
| 17 | + "🔄 Sit tight, the download will begin now...\n" |
| 18 | + ) |
| 19 | + print("\n⏬ Downloading model phi4-mini ...") |
| 20 | + |
15 | 21 | start = time.time()
|
16 | 22 | model_path:str = hf_hub_download(
|
17 | 23 | repo_id=repo_id,
|
18 | 24 | filename=model_filename,
|
19 |
| - local_dir=os.path.expanduser("~/.cache/open-codex"), |
20 |
| - local_dir_use_symlinks=False, |
21 |
| - resume_download=True |
| 25 | + local_dir=local_dir, |
22 | 26 | )
|
23 | 27 | end = time.time()
|
24 | 28 | print(f"✅ Model downloaded in {end - start:.2f}s\n")
|
| 29 | + return model_path |
| 30 | + |
| 31 | + def __init__(self, system_prompt: str): |
| 32 | + model_filename = "Phi-4-mini-instruct-Q3_K_L.gguf" |
| 33 | + repo_id = "lmstudio-community/Phi-4-mini-instruct-GGUF" |
| 34 | + local_dir = os.path.expanduser("~/.cache/open-codex") |
| 35 | + model_path = os.path.join(local_dir, model_filename) |
| 36 | + |
| 37 | + # check if the model is already downloaded |
| 38 | + if not os.path.exists(model_path): |
| 39 | + # download the model |
| 40 | + model_path = self.download_model(model_filename, repo_id, local_dir) |
| 41 | + else: |
| 42 | + print(f"We are locking and loading the model for you...\n") |
25 | 43 |
|
26 | 44 | # suppress the stderr output from llama_cpp
|
27 | 45 | # this is a workaround for the llama_cpp library
|
|
0 commit comments