-
Notifications
You must be signed in to change notification settings - Fork 73
Expand file tree
/
Copy pathollama_example.rs
More file actions
42 lines (37 loc) · 1.42 KB
/
ollama_example.rs
File metadata and controls
42 lines (37 loc) · 1.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
// Import required modules from the LLM library
use llm::{
builder::{LLMBackend, LLMBuilder},
chat::ChatMessage,
};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Get Ollama server URL from environment variable or use default localhost
let base_url = std::env::var("OLLAMA_URL").unwrap_or("http://127.0.0.1:11434".into());
// Initialize and configure the LLM client
let llm = LLMBuilder::new()
.backend(LLMBackend::Ollama) // Use Ollama as the LLM backend
.base_url(base_url) // Set the Ollama server URL
.model("llama3.2:latest")
.max_tokens(1000) // Set maximum response length
.temperature(0.7) // Control response randomness (0.0-1.0)
.build()
.expect("Failed to build LLM (Ollama)");
// Prepare conversation history with example messages
let messages = vec![
ChatMessage::user()
.content("Hello, how do I run a local LLM in Rust?")
.build(),
ChatMessage::assistant()
.content("One way is to use Ollama with a local model!")
.build(),
ChatMessage::user()
.content("Tell me more about that")
.build(),
];
// Send chat request and handle the response
match llm.chat(&messages).await {
Ok(text) => println!("Ollama chat response:\n{text}"),
Err(e) => eprintln!("Chat error: {e}"),
}
Ok(())
}