Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -69,3 +69,4 @@ src/client/spring_ai/target/classes/*
api_server_key
.env

optimizer_settings.json
2 changes: 1 addition & 1 deletion docs/content/advanced/springai.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ export OPENAI_EMBEDDING_MODEL=text-embedding-3-small
export OLLAMA_CHAT_MODEL="llama3.1"
export OLLAMA_EMBEDDING_MODEL=mxbai-embed-large
export OLLAMA_BASE_URL="http://<OLLAMA_SERVER>:11434"
export CONTEXT_INSTR=" You are an assistant for question-answering tasks. Use the retrieved Documents and history to answer the question as accurately and comprehensively as possible. Keep your answer grounded in the facts of the Documents, be concise, and reference the Documents where possible. If you don't know the answer, just say that you are sorry as you don't haven't enough information. "
export SYS_INSTR=" You are an assistant for question-answering tasks. Use the retrieved Documents and history to answer the question as accurately and comprehensively as possible. Keep your answer grounded in the facts of the Documents, be concise, and reference the Documents where possible. If you don't know the answer, just say that you are sorry as you don't haven't enough information. "
export TOP_K=4
export VECTOR_STORE=TEXT_EMBEDDING_3_SMALL_8191_1639_COSINE
export PROVIDER=openai
Expand Down
13 changes: 7 additions & 6 deletions src/client/content/config/tabs/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,31 +172,32 @@ def spring_ai_conf_check(ll_model: dict, embed_model: dict) -> str:

def spring_ai_obaas(src_dir, file_name, provider, ll_config, embed_config):
"""Get the users CTX Prompt"""
ctx_prompt = next(

sys_prompt = next(
item["prompt"]
for item in state.prompt_configs
if item["name"] == state.client_settings["prompts"]["ctx"] and item["category"] == "ctx"
if item["name"] == state.client_settings["prompts"]["sys"] and item["category"] == "sys"
)

logger.info(f"Prompt used in export:\n{sys_prompt}")
with open(src_dir / "templates" / file_name, "r", encoding="utf-8") as template:
template_content = template.read()

database_lookup = st_common.state_configs_lookup("database_configs", "name")

formatted_content = template_content.format(
provider=provider,
ctx_prompt=f"{ctx_prompt}",
sys_prompt=f"{sys_prompt}",
ll_model=ll_config,
vector_search=embed_config,
database_config=database_lookup[state.client_settings["database"]["alias"]],
)

if file_name.endswith(".yaml"):
ctx_prompt = json.dumps(ctx_prompt, indent=True) # Converts it into a valid JSON string (preserving quotes)
sys_prompt = json.dumps(sys_prompt, indent=True) # Converts it into a valid JSON string (preserving quotes)

formatted_content = template_content.format(
provider=provider,
ctx_prompt=ctx_prompt,
sys_prompt=sys_prompt,
ll_model=ll_config,
vector_search=embed_config,
database_config=database_lookup[state.client_settings["database"]["alias"]],
Expand Down
2 changes: 1 addition & 1 deletion src/client/spring_ai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ export OPENAI_EMBEDDING_MODEL=text-embedding-3-small
export OLLAMA_CHAT_MODEL="llama3.1"
export OLLAMA_EMBEDDING_MODEL=mxbai-embed-large
export OLLAMA_BASE_URL="http://<OLLAMA_SERVER>:11434"
export CONTEXT_INSTR=" You are an assistant for question-answering tasks. Use the retrieved Documents and history to answer the question as accurately and comprehensively as possible. Keep your answer grounded in the facts of the Documents, be concise, and reference the Documents where possible. If you don't know the answer, just say that you are sorry as you don't haven't enough information. "
export SYS_INSTR=" You are an assistant for question-answering tasks. Use the retrieved Documents and history to answer the question as accurately and comprehensively as possible. Keep your answer grounded in the facts of the Documents, be concise, and reference the Documents where possible. If you don't know the answer, just say that you are sorry as you don't haven't enough information. "
export TOP_K=4
export VECTOR_STORE=TEXT_EMBEDDING_3_SMALL_8191_1639_COSINE
export PROVIDER=openai
Expand Down
2 changes: 1 addition & 1 deletion src/client/spring_ai/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>3.4.5</version>
<version>3.4.9</version>
<relativePath /> <!-- lookup parent from repository -->
</parent>
<groupId>com.example</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ public String legacyTable(@Value("${aims.vectortable.name}") String table) {
}

@Bean
public String contextInstr(@Value("${aims.context_instr}") String instr) {
public String contextInstr(@Value("${aims.sys_instr}") String instr) {
return instr;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,15 @@ public Prompt buildPrompt(String message, String contextInstr, int topK) {

INSTRUCTIONS:""";

// That's a standard RAG instruction, provided for convenience to change the contextInstr coming by the Oracle AI Optimizer export
String defaultInstr = """
Answer the users question using the DOCUMENTS text above.
Keep your answer ground in the facts of the DOCUMENTS.
If the DOCUMENTS doesn’t contain the facts to answer the QUESTION, return:
I'm sorry but I haven't enough information to answer.
""";

template += "\n" + defaultInstr;
template += "\n" + contextInstr;

List<Document> similarDocuments = vectorStore.similaritySearch(
SearchRequest.builder().query(message).topK(topK).build());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ spring:
options:
model: ${OLLAMA_EMBEDDING_MODEL}
aims:
context_instr: ${CONTEXT_INSTR}
sys_instr: ${SYS_INSTR}
vectortable:
name: ${VECTOR_STORE}
rag_params:
Expand Down
2 changes: 1 addition & 1 deletion src/client/spring_ai/templates/obaas.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ spring:
options:
model: \"{vector_search[id]}\"
aims:
context_instr: \"{ctx_prompt}\"
sys_instr: \"{sys_prompt}\"
vectortable:
name: {vector_search[vector_store]}
rag_params:
Expand Down
2 changes: 1 addition & 1 deletion src/client/spring_ai/templates/start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ export DB_USERNAME="{database_config[user]}"
export DB_PASSWORD="{database_config[password]}"
export DISTANCE_TYPE="{vector_search[distance_metric]}"
export INDEX_TYPE="{vector_search[index_type]}"
export CONTEXT_INSTR="{ctx_prompt}"
export SYS_INSTR="{sys_prompt}"
export TOP_K="{vector_search[top_k]}"

export VECTOR_STORE="{vector_search[vector_store]}"
Expand Down
2 changes: 1 addition & 1 deletion src/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ server = [
"langchain-perplexity==0.1.2",
"langchain-xai==0.2.5",
"langgraph==0.6.6",
"litellm==1.76.1",
"litellm==1.77.1",
"llama-index==0.13.3",
"lxml==6.0.0",
"matplotlib==3.10.6",
Expand Down