Skip to content

Commit 3c476da

Browse files
author
Kye
committed
[BUFG][Swarms Integration]
1 parent e3b2c9a commit 3c476da

File tree

6 files changed

+149
-24
lines changed

6 files changed

+149
-24
lines changed

errors.txt

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/thop/profile.py:12: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.
2+
if LooseVersion(torch.__version__) < LooseVersion("1.0.0"):
3+
/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/thop/profile.py:68: DeprecationWarning: distutils Version classes are deprecated. Use packaging.version instead.
4+
if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"):
5+
distutils Version classes are deprecated. Use packaging.version instead.
6+
distutils Version classes are deprecated. Use packaging.version instead.
7+
Traceback (most recent call last):
8+
File "/Users/defalt/Desktop/Athena/research/tree-of-thoughts/example.py", line 62, in <module>
9+
solution = tree_of_thoughts.solve(
10+
^^^^^^^^^^^^^^^^^^^^^^^
11+
File "/Users/defalt/Desktop/Athena/research/tree-of-thoughts/tree_of_thoughts/treeofthoughts.py", line 559, in solve
12+
return self.monte_carlo_search(
13+
^^^^^^^^^^^^^^^^^^^^^^^^
14+
File "/Users/defalt/Desktop/Athena/research/tree-of-thoughts/tree_of_thoughts/treeofthoughts.py", line 604, in monte_carlo_search
15+
thoughts = self.model.generate_thoughts(
16+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
17+
File "/Users/defalt/Desktop/Athena/research/tree-of-thoughts/tree_of_thoughts/tot_agent.py", line 106, in generate_thoughts
18+
thoughts = self.run(prompt)
19+
^^^^^^^^^^^^^^^^
20+
File "/Users/defalt/Desktop/Athena/research/tree-of-thoughts/tree_of_thoughts/tot_agent.py", line 70, in run
21+
response = self.agent(task)
22+
^^^^^^^^^^^^^^^^
23+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/swarms/structs/agent.py", line 649, in __call__
24+
self.run(task, img, *args, **kwargs)
25+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/swarms/structs/agent.py", line 570, in run
26+
response = self.llm(
27+
^^^^^^^^^
28+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/llms/base.py", line 876, in __call__
29+
self.generate(
30+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/llms/base.py", line 656, in generate
31+
output = self._generate_helper(
32+
^^^^^^^^^^^^^^^^^^^^^^
33+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/llms/base.py", line 544, in _generate_helper
34+
raise e
35+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/langchain/llms/base.py", line 531, in _generate_helper
36+
self._generate(
37+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/swarms/models/openai_models.py", line 1082, in _generate
38+
full_response = completion_with_retry(
39+
^^^^^^^^^^^^^^^^^^^^^^
40+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/swarms/models/openai_models.py", line 212, in completion_with_retry
41+
return _completion_with_retry(**kwargs)
42+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
43+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 289, in wrapped_f
44+
return self(f, *args, **kw)
45+
^^^^^^^^^^^^^^^^^^^^
46+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 379, in __call__
47+
do = self.iter(retry_state=retry_state)
48+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
49+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 314, in iter
50+
return fut.result()
51+
^^^^^^^^^^^^
52+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py", line 449, in result
53+
return self.__get_result()
54+
^^^^^^^^^^^^^^^^^^^
55+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/concurrent/futures/_base.py", line 401, in __get_result
56+
raise self._exception
57+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/tenacity/__init__.py", line 382, in __call__
58+
result = fn(*args, **kwargs)
59+
^^^^^^^^^^^^^^^^^^^
60+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/swarms/models/openai_models.py", line 210, in _completion_with_retry
61+
return llm.client.create(**kwargs)
62+
^^^^^^^^^^^^^^^^^^^^^^^^^^^
63+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/chat_completion.py", line 25, in create
64+
return super().create(*args, **kwargs)
65+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
66+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_resources/abstract/engine_api_resource.py", line 153, in create
67+
response, _, api_key = requestor.request(
68+
^^^^^^^^^^^^^^^^^^
69+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py", line 288, in request
70+
result = self.request_raw(
71+
^^^^^^^^^^^^^^^^^
72+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/openai/api_requestor.py", line 596, in request_raw
73+
result = _thread_context.session.request(
74+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
75+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/sessions.py", line 589, in request
76+
resp = self.send(prep, **send_kwargs)
77+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
78+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/sessions.py", line 703, in send
79+
r = adapter.send(request, **kwargs)
80+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
81+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/requests/adapters.py", line 486, in send
82+
resp = conn.urlopen(
83+
^^^^^^^^^^^^^
84+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 703, in urlopen
85+
httplib_response = self._make_request(
86+
^^^^^^^^^^^^^^^^^^^
87+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 449, in _make_request
88+
six.raise_from(e, None)
89+
File "<string>", line 3, in raise_from
90+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/urllib3/connectionpool.py", line 444, in _make_request
91+
httplib_response = conn.getresponse()
92+
^^^^^^^^^^^^^^^^^^
93+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/http/client.py", line 1374, in getresponse
94+
response.begin()
95+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/http/client.py", line 318, in begin
96+
version, status, reason = self._read_status()
97+
^^^^^^^^^^^^^^^^^^^
98+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/http/client.py", line 279, in _read_status
99+
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
100+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
101+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/socket.py", line 705, in readinto
102+
return self._sock.recv_into(b)
103+
^^^^^^^^^^^^^^^^^^^^^^^
104+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1278, in recv_into
105+
return self.read(nbytes, buffer)
106+
^^^^^^^^^^^^^^^^^^^^^^^^^
107+
File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/ssl.py", line 1134, in read
108+
return self._sslobj.read(len, buffer)
109+
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
110+
KeyboardInterrupt
111+
Implicitly cleaning up <TemporaryDirectory '/var/folders/zl/0vgkvtm52vjcs1skpnlqnssw0000gn/T/tmpk9wgadbgwandb'>
112+
Implicitly cleaning up <TemporaryDirectory '/var/folders/zl/0vgkvtm52vjcs1skpnlqnssw0000gn/T/tmpeqcgk3h9wandb-artifacts'>
113+
Implicitly cleaning up <TemporaryDirectory '/var/folders/zl/0vgkvtm52vjcs1skpnlqnssw0000gn/T/tmpoadrfcapwandb-media'>
114+
Implicitly cleaning up <TemporaryDirectory '/var/folders/zl/0vgkvtm52vjcs1skpnlqnssw0000gn/T/tmp431qofrrwandb-media'>
115+
Implicitly cleaning up <TemporaryDirectory '/var/folders/zl/0vgkvtm52vjcs1skpnlqnssw0000gn/T/tmpichwdx5i'>

example.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from dotenv import load_dotenv
44
from swarms import Agent, OpenAIChat
55

6+
# Load the environment variables
67
load_dotenv()
78

89
# Get the API key from the environment
@@ -11,9 +12,12 @@
1112
# Initialize an agent from swarms
1213
agent = Agent(
1314
agent_name="tree_of_thoughts",
14-
agent_description="This agent uses the tree_of_thoughts library to generate thoughts.",
15+
agent_description=(
16+
"This agent uses the tree_of_thoughts library to generate thoughts."
17+
),
1518
system_prompt=None,
16-
llm = OpenAIChat(),
19+
llm=OpenAIChat(),
20+
max_loops=1,
1721
)
1822

1923
# Initialize the ToTAgent class with the API key

huggingface_example.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import os
22
from tree_of_thoughts import ToTAgent, MonteCarloSearch
33
from dotenv import load_dotenv
4-
from swarms import Agent, OpenAIChat, HuggingfaceLLM
4+
from swarms import Agent, HuggingfaceLLM
55

66
load_dotenv()
77

@@ -11,11 +11,13 @@
1111
# Initialize an agent from swarms
1212
agent = Agent(
1313
agent_name="tree_of_thoughts",
14-
agent_description="This agent uses the tree_of_thoughts library to generate thoughts.",
14+
agent_description=(
15+
"This agent uses the tree_of_thoughts library to generate thoughts."
16+
),
1517
system_prompt=None,
16-
llm = HuggingfaceLLM(
18+
llm=HuggingfaceLLM(
1719
"EleutherAI/gpt-neo-2.7B",
18-
),
20+
),
1921
)
2022

2123
# Initialize the ToTAgent class with the API key

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[tool.poetry]
22
name = "tree-of-thoughts"
3-
version = "0.4.2"
3+
version = "0.4.5"
44
description = "Tree of Thoughts - Pytorch"
55
authors = ["Kye Gomez <[email protected]>"]
66
license = "MIT"

tree_of_thoughts/tot_agent.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,20 +12,20 @@ class ToTAgent:
1212
"""
1313
1414
OpenAI Language Model API Wrapper
15-
15+
1616
Args:
1717
agent (Agent): Agent class from swarms
1818
strategy (str): Strategy to use for generating thoughts
1919
evaluation_strategy (str): Strategy to use for evaluating states
2020
enable_react (bool): Enable ReAct prompting
2121
k (int): Number of thoughts to generate
22-
22+
2323
Methods:
2424
run(task: str) -> list: Generate text from prompt using OpenAI API
2525
generate_thoughts(state, k, initial_prompt, rejected_solutions=None) -> list: Generate thoughts from state using OpenAI API
2626
generate_solution(initial_prompt, state, rejected_solutions=None) -> str: Generate solution from state using OpenAI API
2727
evaluate_states(states, initial_prompt) -> dict: Evaluate states of reasoning using OpenAI API
28-
28+
2929
Examples:
3030
>>> from tree_of_thoughts.tot_agent import ToTAgent
3131
>>> from swarms import Agent
@@ -103,7 +103,7 @@ def generate_thoughts(
103103
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
104104

105105
prompt += self.react_prompt
106-
thoughts = self.generate_text(prompt)
106+
thoughts = self.run(prompt)
107107
return thoughts
108108

109109
def generate_solution(self, initial_prompt, state, rejected_solutions=None):
@@ -119,7 +119,7 @@ def generate_solution(self, initial_prompt, state, rejected_solutions=None):
119119
Devise the best possible solution for the task: {initial_prompt}, Here are evaluated solutions that were rejected:
120120
###{rejected_solutions}###,
121121
complete the {initial_prompt} without making the same mistakes you did with the evaluated rejected solutions. Be simple. Be direct. Provide intuitive solutions as soon as you think of them."""
122-
answer = self.generate_text(prompt, 1)
122+
answer = self.run(prompt)
123123
print(f"Answerrrrrr {answer}")
124124
# print(thoughts)
125125
# print(f"General Solution : {answer}")

tree_of_thoughts/treeofthoughts.py

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,8 @@ def adjust_pruning_threshold_moving_average(
113113

114114

115115
class BFS(TreeofThoughts):
116+
"""Class representing the Breadth-First Search algorithm for Tree of Thoughts."""
117+
116118
def solve(
117119
self,
118120
initial_prompt,
@@ -122,6 +124,20 @@ def solve(
122124
value_threshold,
123125
pruning_threshold=0.5,
124126
):
127+
"""
128+
Solve the Tree of Thoughts problem using the Breadth-First Search algorithm.
129+
130+
Args:
131+
initial_prompt (str): The initial prompt for generating thoughts.
132+
num_thoughts (int): The number of thoughts to generate at each state.
133+
max_steps (int): The maximum number of steps to take in the search.
134+
max_states (int): The maximum number of states to keep track of.
135+
value_threshold (float): The threshold value for selecting states.
136+
pruning_threshold (float, optional): The threshold for dynamic pruning. Defaults to 0.5.
137+
138+
Returns:
139+
str or None: The generated solution or the highest rated state.
140+
"""
125141
current_states = [initial_prompt]
126142
state_values = {}
127143
dynamic_pruning_threshold = pruning_threshold
@@ -177,18 +193,6 @@ def solve(
177193
self.logNewState(state, value)
178194
logger.debug(f"State Values: {state_values}")
179195

180-
# if state_values:
181-
# highest_rated_solution = max(state_values.items(), key=lambda x: x[1])
182-
# print(f"highest rated solution: {highest_rated_solution}")
183-
# highest_rated_state = highest_rated_solution[0] # Use a different name to avoid confusion
184-
# print(f'highest rated state: {highest_rated_state}')
185-
# try:
186-
# solution = self.model.generate_solution(initial_prompt, highest_rated_state)
187-
# except Exception as e:
188-
# logger.error(f"Error in generating solution: {e}")
189-
# solution = None # Set a fallback value for solution
190-
191-
# return solution if solution is not None else highest_rated_state # Return highest rated state if solution is None
192196
if state_values:
193197
highest_rated_solution = max(
194198
state_values.items(), key=lambda x: x[1]

0 commit comments

Comments
 (0)