Skip to content

Commit 94d1940

Browse files
committed
v0.8.2 release
- Vision - Code execution sessions - Chat names
2 parents 88f2778 + 2597842 commit 94d1940

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

41 files changed

+716
-210
lines changed

README.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,8 @@
44

55
# `Agent Zero`
66

7-
[![Thanks to Sponsors](https://img.shields.io/badge/GitHub%20Sponsors-Thanks%20to%20Sponsors-FF69B4?style=for-the-badge&logo=githubsponsors&logoColor=white)](https://github.com/sponsors/frdel) [![Join our Skool Community](https://img.shields.io/badge/Skool-Join%20our%20Community-4A90E2?style=for-the-badge&logo=skool&logoColor=white)](https://www.skool.com/agent-zero) [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/B8KZKNsPpj) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@AgentZeroFW) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/jan-tomasek/) [![Follow on Warpcast](https://img.shields.io/badge/Warpcast-Follow-5A32F3?style=for-the-badge)](https://warpcast.com/agent-zero)
7+
[![Thanks to Sponsors](https://img.shields.io/badge/GitHub%20Sponsors-Thanks%20to%20Sponsors-FF69B4?style=for-the-badge&logo=githubsponsors&logoColor=white)](https://github.com/sponsors/frdel) [![Follow on X](https://img.shields.io/badge/X-Follow-000000?style=for-the-badge&logo=x&logoColor=white)](https://x.com/Agent0_ai) [![Join our Skool Community](https://img.shields.io/badge/Skool-Join%20our%20Community-4A90E2?style=for-the-badge&logo=skool&logoColor=white)](https://www.skool.com/agent-zero) [![Join our Discord](https://img.shields.io/badge/Discord-Join%20our%20server-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/B8KZKNsPpj) [![Subscribe on YouTube](https://img.shields.io/badge/YouTube-Subscribe-red?style=for-the-badge&logo=youtube&logoColor=white)](https://www.youtube.com/@AgentZeroFW) [![Connect on LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/in/jan-tomasek/) [![Follow on Warpcast](https://img.shields.io/badge/Warpcast-Follow-5A32F3?style=for-the-badge)](https://warpcast.com/agent-zero)
88

9-
> **Note:** Agent Zero does not use Twitter/X. Any Twitter/X accounts claiming to represent this project are fake.
109

1110
[Installation](./docs/installation.md)
1211
[How to update](./docs/installation.md#how-to-update-agent-zero)

agent.py

Lines changed: 47 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,22 @@
1010
from langchain_core.prompt_values import ChatPromptValue
1111
from python.helpers import extract_tools, rate_limiter, files, errors, history, tokens
1212
from python.helpers.print_style import PrintStyle
13-
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
14-
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
13+
from langchain_core.prompts import (
14+
ChatPromptTemplate,
15+
MessagesPlaceholder,
16+
HumanMessagePromptTemplate,
17+
StringPromptTemplate,
18+
)
19+
from langchain_core.prompts.image import ImagePromptTemplate
20+
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, BaseMessage
1521
from langchain_core.language_models.chat_models import BaseChatModel
1622
from langchain_core.language_models.llms import BaseLLM
1723
from langchain_core.embeddings import Embeddings
1824
import python.helpers.log as Log
1925
from python.helpers.dirty_json import DirtyJson
2026
from python.helpers.defer import DeferredTask
2127
from typing import Callable
28+
from python.helpers.history import OutputMessage
2229

2330

2431
class AgentContext:
@@ -89,7 +96,7 @@ def nudge(self):
8996
else:
9097
current_agent = self.agent0
9198

92-
self.task =self.run_task(current_agent.monologue)
99+
self.task = self.run_task(current_agent.monologue)
93100
return self.task
94101

95102
def communicate(self, msg: "UserMessage", broadcast_level: int = 1):
@@ -128,9 +135,9 @@ def run_task(
128135
async def _process_chain(self, agent: "Agent", msg: "UserMessage|str", user=True):
129136
try:
130137
msg_template = (
131-
await agent.hist_add_user_message(msg) # type: ignore
138+
agent.hist_add_user_message(msg) # type: ignore
132139
if user
133-
else await agent.hist_add_tool_result(
140+
else agent.hist_add_tool_result(
134141
tool_name="call_subordinate", tool_result=msg # type: ignore
135142
)
136143
)
@@ -187,7 +194,7 @@ class AgentConfig:
187194
@dataclass
188195
class UserMessage:
189196
message: str
190-
attachments: list[str]
197+
attachments: list[str] = field(default_factory=list[str])
191198

192199

193200
class LoopData:
@@ -281,9 +288,6 @@ async def stream_callback(chunk: str, full: str):
281288
printer.stream(chunk)
282289
self.log_from_stream(full, log)
283290

284-
# store as last context window content
285-
self.set_data(Agent.DATA_NAME_CTX_WINDOW, prompt.format())
286-
287291
agent_response = await self.call_chat_model(
288292
prompt, callback=stream_callback
289293
)
@@ -294,18 +298,18 @@ async def stream_callback(chunk: str, full: str):
294298
self.loop_data.last_response == agent_response
295299
): # if assistant_response is the same as last message in history, let him know
296300
# Append the assistant's response to the history
297-
await self.hist_add_ai_response(agent_response)
301+
self.hist_add_ai_response(agent_response)
298302
# Append warning message to the history
299303
warning_msg = self.read_prompt("fw.msg_repeat.md")
300-
await self.hist_add_warning(message=warning_msg)
304+
self.hist_add_warning(message=warning_msg)
301305
PrintStyle(font_color="orange", padding=True).print(
302306
warning_msg
303307
)
304308
self.context.log.log(type="warning", content=warning_msg)
305309

306310
else: # otherwise proceed with tool
307311
# Append the assistant's response to the history
308-
await self.hist_add_ai_response(agent_response)
312+
self.hist_add_ai_response(agent_response)
309313
# process tools requested in agent message
310314
tools_result = await self.process_tools(agent_response)
311315
if tools_result: # final response of message loop available
@@ -317,7 +321,7 @@ async def stream_callback(chunk: str, full: str):
317321
except RepairableException as e:
318322
# Forward repairable errors to the LLM, maybe it can fix them
319323
error_message = errors.format_error(e)
320-
await self.hist_add_warning(error_message)
324+
self.hist_add_warning(error_message)
321325
PrintStyle(font_color="red", padding=True).print(error_message)
322326
self.context.log.log(type="error", content=error_message)
323327
except Exception as e:
@@ -356,19 +360,32 @@ async def prepare_prompt(self, loop_data: LoopData) -> ChatPromptTemplate:
356360
extras += history.Message(False, content=extra).output()
357361
loop_data.extras_temporary.clear()
358362

359-
# combine history and extras
360-
history_combined = history.group_outputs_abab(loop_data.history_output + extras)
361-
362-
# convert history to LLM format
363-
history_langchain = history.output_langchain(history_combined)
363+
# convert history + extras to LLM format
364+
history_langchain: list[BaseMessage] = history.output_langchain(
365+
loop_data.history_output + extras
366+
)
364367

365368
# build chain from system prompt, message history and model
369+
system_text = "\n\n".join(loop_data.system)
366370
prompt = ChatPromptTemplate.from_messages(
367371
[
368-
SystemMessage(content="\n\n".join(loop_data.system)),
372+
SystemMessage(content=system_text),
369373
*history_langchain,
374+
AIMessage(content="JSON:"), # force the LLM to start with json
370375
]
371376
)
377+
378+
# store as last context window content
379+
self.set_data(
380+
Agent.DATA_NAME_CTX_WINDOW,
381+
{
382+
"text": prompt.format(),
383+
"tokens": self.history.get_tokens()
384+
+ tokens.approximate_tokens(system_text)
385+
+ tokens.approximate_tokens(history.output_text(extras)),
386+
},
387+
)
388+
372389
return prompt
373390

374391
def handle_critical_exception(self, exception: Exception):
@@ -435,12 +452,12 @@ def get_data(self, field: str):
435452
def set_data(self, field: str, value):
436453
self.data[field] = value
437454

438-
def hist_add_message(self, ai: bool, content: history.MessageContent):
439-
return self.history.add_message(ai=ai, content=content)
440-
441-
async def hist_add_user_message(
442-
self, message: UserMessage, intervention: bool = False
455+
def hist_add_message(
456+
self, ai: bool, content: history.MessageContent, tokens: int = 0
443457
):
458+
return self.history.add_message(ai=ai, content=content, tokens=tokens)
459+
460+
def hist_add_user_message(self, message: UserMessage, intervention: bool = False):
444461
self.history.new_topic() # user message starts a new topic in history
445462

446463
# load message template based on intervention
@@ -470,16 +487,16 @@ async def hist_add_user_message(
470487
self.last_user_message = msg
471488
return msg
472489

473-
async def hist_add_ai_response(self, message: str):
490+
def hist_add_ai_response(self, message: str):
474491
self.loop_data.last_response = message
475492
content = self.parse_prompt("fw.ai_response.md", message=message)
476493
return self.hist_add_message(True, content=content)
477494

478-
async def hist_add_warning(self, message: history.MessageContent):
495+
def hist_add_warning(self, message: history.MessageContent):
479496
content = self.parse_prompt("fw.warning.md", message=message)
480497
return self.hist_add_message(False, content=content)
481498

482-
async def hist_add_tool_result(self, tool_name: str, tool_result: str):
499+
def hist_add_tool_result(self, tool_name: str, tool_result: str):
483500
content = self.parse_prompt(
484501
"fw.tool_result.md", tool_name=tool_name, tool_result=tool_result
485502
)
@@ -613,9 +630,9 @@ async def handle_intervention(self, progress: str = ""):
613630
msg = self.intervention
614631
self.intervention = None # reset the intervention message
615632
if progress.strip():
616-
await self.hist_add_ai_response(progress)
633+
self.hist_add_ai_response(progress)
617634
# append the intervention message
618-
await self.hist_add_user_message(msg, intervention=True)
635+
self.hist_add_user_message(msg, intervention=True)
619636
raise InterventionException(msg)
620637

621638
async def wait_if_paused(self):
@@ -642,7 +659,7 @@ async def process_tools(self, msg: str):
642659
return response.message
643660
else:
644661
msg = self.read_prompt("fw.msg_misformat.md")
645-
await self.hist_add_warning(msg)
662+
self.hist_add_warning(msg)
646663
PrintStyle(font_color="red", padding=True).print(msg)
647664
self.context.log.log(
648665
type="error", content=f"{self.agent_name}: Message misformat"

docker/run/DockerfileKali

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
# Use the latest slim version of Kali Linux
2+
FROM kalilinux/kali-rolling
3+
4+
# Check if the argument is provided, else throw an error
5+
ARG BRANCH
6+
RUN if [ -z "$BRANCH" ]; then echo "ERROR: BRANCH is not set!" >&2; exit 1; fi
7+
ENV BRANCH=$BRANCH
8+
9+
# Copy contents of the project to /a0
10+
COPY ./fs/ /
11+
12+
# pre installation steps
13+
RUN bash /ins/pre_install.sh $BRANCH
14+
RUN bash /ins/pre_install_kali.sh $BRANCH
15+
16+
# install additional software
17+
RUN bash /ins/install_additional.sh $BRANCH
18+
19+
# install A0
20+
RUN bash /ins/install_A0.sh $BRANCH
21+
22+
# cleanup repo and install A0 without caching, this speeds up builds
23+
ARG CACHE_DATE=none
24+
RUN echo "cache buster $CACHE_DATE" && bash /ins/install_A02.sh $BRANCH
25+
26+
# post installation steps
27+
RUN bash /ins/post_install.sh $BRANCH
28+
29+
# Expose ports
30+
EXPOSE 22 80
31+
32+
# initialize runtime
33+
CMD ["/bin/bash", "-c", "/bin/bash /exe/initialize.sh $BRANCH"]

docker/run/build.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,9 @@ docker build -t agent-zero-run:local --build-arg BRANCH=development --build-arg
44
# local image without cache
55
docker build -t agent-zero-run:local --build-arg BRANCH=development --no-cache .
66

7+
# local image from Kali
8+
docker build -f ./DockerfileKali -t agent-zero-run:hacking --build-arg BRANCH=main --build-arg CACHE_DATE=$(date +%Y-%m-%d:%H:%M:%S) .
9+
710
# dockerhub push:
811

912
docker login
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
11
#!/bin/bash
22

3+
# install playwright
4+
bash /ins/install_playwright.sh "$@"
5+
36
# searxng
47
bash /ins/install_searxng.sh "$@"

docker/run/fs/ins/install_playwright.sh

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,12 @@
77
pip install playwright
88

99
# install chromium with dependencies
10-
playwright install --with-deps chromium-headless-shell
10+
# for kali-based
11+
if [ "$@" = "hacking" ]; then
12+
apt-get install -y fonts-unifont libnss3 libnspr4
13+
playwright install chromium-headless-shell
14+
else
15+
# for debian based
16+
playwright install --with-deps chromium-headless-shell
17+
fi
18+

docker/run/fs/ins/post_install.sh

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
11
#!/bin/bash
22

3-
# install playwright
4-
bash /ins/install_playwright.sh "$@"
5-
63
# Cleanup package list
74
rm -rf /var/lib/apt/lists/*
85
apt-get clean

docker/run/fs/ins/pre_install.sh

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
# Update and install necessary packages
44
apt-get update && apt-get install -y \
55
python3 \
6-
python3-pip \
76
python3-venv \
87
nodejs \
98
npm \
@@ -14,5 +13,18 @@ apt-get update && apt-get install -y \
1413
git \
1514
ffmpeg
1615

17-
# prepare SSH daemon
18-
bash /ins/setup_ssh.sh "$@"
16+
# # Configure system alternatives so that /usr/bin/python3 points to Python 3.12
17+
# sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.12 1
18+
# sudo update-alternatives --set python3 /usr/bin/python3.12
19+
20+
# Update pip3 symlink: if pip3.12 exists, point pip3 to it;
21+
# otherwise, install pip using Python 3.12's ensurepip.
22+
# if [ -f /usr/bin/pip3.12 ]; then
23+
# sudo ln -sf /usr/bin/pip3.12 /usr/bin/pip3
24+
# else
25+
python3 -m ensurepip --upgrade
26+
python3 -m pip install --upgrade pip
27+
# fi
28+
29+
# Prepare SSH daemon
30+
bash /ins/setup_ssh.sh "$@"

docker/run/fs/ins/pre_install_kali.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#!/bin/bash
2+
3+
# ubuntu based dependencies for playwright
4+
# moved to install_playwright.sh
5+
# apt-get update && apt-get install -y fonts-unifont fonts-ubuntu
6+

docker/run/fs/ins/setup_ssh.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/bin/bash
22

33
# Set up SSH
4-
mkdir /var/run/sshd && \
4+
mkdir -p /var/run/sshd && \
55
# echo 'root:toor' | chpasswd && \
66
sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config

initialize.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ def initialize():
1313
provider=models.ModelProvider[current_settings["chat_model_provider"]],
1414
name=current_settings["chat_model_name"],
1515
ctx_length=current_settings["chat_model_ctx_length"],
16+
vision=current_settings["chat_model_vision"],
1617
limit_requests=current_settings["chat_model_rl_requests"],
1718
limit_input=current_settings["chat_model_rl_input"],
1819
limit_output=current_settings["chat_model_rl_output"],

models.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
HuggingFaceEndpoint,
2121
)
2222
from langchain_google_genai import (
23-
GoogleGenerativeAI,
23+
ChatGoogleGenerativeAI,
2424
HarmBlockThreshold,
2525
HarmCategory,
2626
embeddings as google_embeddings,
@@ -267,7 +267,7 @@ def get_google_chat(
267267
):
268268
if not api_key:
269269
api_key = get_api_key("google")
270-
return GoogleGenerativeAI(model=model_name, google_api_key=api_key, safety_settings={HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE}, **kwargs) # type: ignore
270+
return ChatGoogleGenerativeAI(model=model_name, google_api_key=api_key, safety_settings={HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE}, **kwargs) # type: ignore
271271

272272

273273
def get_google_embedding(
@@ -277,7 +277,7 @@ def get_google_embedding(
277277
):
278278
if not api_key:
279279
api_key = get_api_key("google")
280-
return google_embeddings.GoogleGenerativeAIEmbeddings(model=model_name, api_key=api_key, **kwargs) # type: ignore
280+
return google_embeddings.GoogleGenerativeAIEmbeddings(model=model_name, google_api_key=api_key, **kwargs) # type: ignore
281281

282282

283283
# Mistral models
@@ -332,7 +332,7 @@ def get_openrouter_chat(
332332
dotenv.get_dotenv_value("OPEN_ROUTER_BASE_URL")
333333
or "https://openrouter.ai/api/v1"
334334
)
335-
return ChatOpenAI(api_key=api_key, model=model_name, base_url=base_url, **kwargs) # type: ignore
335+
return ChatOpenAI(api_key=api_key, model=model_name, base_url=base_url, stream_usage=True, **kwargs) # type: ignore
336336

337337

338338
def get_openrouter_embedding(
Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# Instruments
2-
- following are instruments at disposal:
2+
- following are instruments at disposal
3+
- do not overly rely on them they might not be relevant
34

4-
{{instruments}}
5+
{{instruments}}
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# Memories on the topic
2-
- following are your memories about current topic:
2+
- following are memories about current topic
3+
- do not overly rely on them they might not be relevant
34

45
{{memories}}
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# Solutions from the past
2-
- following are your memories about successful solutions of related problems:
2+
- following are memories about successful solutions of related problems
3+
- do not overly rely on them they might not be relevant
34

45
{{solutions}}

0 commit comments

Comments
 (0)