Skip to content
Merged
Show file tree
Hide file tree
Changes from 120 commits
Commits
Show all changes
121 commits
Select commit Hold shift + click to select a range
fe58941
Switches logging to Logger functionality from Database
Neverbolt Jul 26, 2024
f3c0642
Adds global configurables
Neverbolt Aug 6, 2024
11eeb54
Moves all `_log`s to global `log`
Neverbolt Aug 6, 2024
934c177
start 0.4.0 development cycle
andreashappe Aug 29, 2024
b48336d
write down publishing notes
andreashappe Aug 29, 2024
13cb9ac
TEMP PLEASE APPEND
Neverbolt Sep 2, 2024
0e60396
Update pyproject.toml
andreashappe Sep 2, 2024
dc3ecdd
Formatting and Linting
Neverbolt Sep 3, 2024
ef8d85e
Merge branch 'development' of github.com:ipa-lab/hackingBuddyGPT into…
Neverbolt Sep 3, 2024
1516d98
start 0.4.0 development cycle
andreashappe Aug 29, 2024
9d0d9fd
write down publishing notes
andreashappe Aug 29, 2024
80f4f8f
Formatting and Linting
Neverbolt Sep 3, 2024
76b3de3
Update pyproject.toml
andreashappe Sep 2, 2024
05314d7
Merge branch 'development' of github.com:ipa-lab/hackingBuddyGPT into…
andreashappe Sep 4, 2024
b634bf0
Minor changes and fixes
Neverbolt Nov 28, 2024
28c5bf9
Merge branch 'main' of github.com:ipa-lab/hackingBuddyGPT into log_in…
Neverbolt Nov 28, 2024
692e05c
Added Viewer and Remote Logging (currently not usable without code ch…
Neverbolt Nov 28, 2024
bef8e09
re-adds instructor
Neverbolt Dec 3, 2024
663fd82
rolls back sqlite3 autocommit to isolation_level=None for <3.12 compa…
Neverbolt Dec 3, 2024
0133260
feat: add gpt-4o, gpt-4o-mini, o1-preview, o1-mini
andreashappe Dec 3, 2024
c78f688
fix: remove logging comments
lloydchang Oct 29, 2024
2e22fe8
fix: remove extraneous line
lloydchang Oct 29, 2024
7cd5356
feat: add codespace support
lloydchang Oct 28, 2024
44c8e59
fix: chmod +x and install prerequisites
lloydchang Oct 28, 2024
eaee91e
fix: typos
lloydchang Oct 28, 2024
17a0190
fix: use gpt-3.5-turbo
lloydchang Oct 28, 2024
be28798
docs: update README.md to match shell script
lloydchang Oct 28, 2024
8d7427f
fix: no longer start automaticall
lloydchang Oct 28, 2024
4151f4a
docs: fix typo
lloydchang Oct 28, 2024
d119de1
docs: fix typo
lloydchang Oct 28, 2024
5fc19ba
docs: add cost estimates
lloydchang Oct 28, 2024
c25b4aa
doc: clarify instructions
lloydchang Oct 28, 2024
96da632
docs: clarify steps in shell script
lloydchang Oct 28, 2024
2647b10
docs: clarify intermediary steps and failure mode
lloydchang Oct 28, 2024
7542805
docs: clarify key combination and command palette
lloydchang Oct 28, 2024
1547438
docs: fix typo
lloydchang Oct 28, 2024
53e568a
fix: use gpt-4-turbo
lloydchang Oct 29, 2024
1fec09a
fix: change parameters to facilitate debugging
lloydchang Oct 29, 2024
5725778
docs(README.md): fix typo
lloydchang Nov 3, 2024
ccd2e13
fix(.gitignore): ignore temporary codespaces ansible files
lloydchang Nov 3, 2024
13f639c
add: mac 127.0.0.1
lloydchang Nov 13, 2024
3c6635d
fix: use existing GEMINI_API_KEY environment variable
lloydchang Nov 13, 2024
e6cd1ac
docs: explain why 127.0.0.1 49152 is used
lloydchang Nov 13, 2024
9500255
fix: explain localhost as a workaround
lloydchang Nov 13, 2024
552c56c
fix: use gemini-1.5-flash-latest
lloydchang Nov 13, 2024
1c457ee
docs: add clarifications about Mac experiment
lloydchang Nov 13, 2024
c12892a
fix: move Mac related information into MAC.md
lloydchang Nov 13, 2024
f6fe862
docs: add clarifications about Mac experiment
lloydchang Nov 13, 2024
78a4307
docs: content moved to MAC.md
lloydchang Nov 13, 2024
d69d88e
fix: clarify docs and shell script comment
lloydchang Nov 13, 2024
7c569fd
docs: clarify that disclaimers are in README.md
lloydchang Nov 13, 2024
374ef52
docs: add bolding and bullets for clarifications
lloydchang Nov 13, 2024
c6c8370
docs: add Preqrequisite: Install Homebrew and Bash version 5
lloydchang Nov 13, 2024
bc48c4a
fix: minor typo
lloydchang Nov 13, 2024
7bf1a62
docs: fix minor typo
lloydchang Nov 13, 2024
879c6c0
doc(README.md): add Mac use case
lloydchang Nov 14, 2024
0e706ec
docs(README.md): format Mac use case
lloydchang Nov 14, 2024
d697c19
chore: Add Bash version checks and move scripts to a subdirectory
lloydchang Dec 2, 2024
ff9019d
chore: Update devcontainer.json to use scripts prefix
lloydchang Dec 2, 2024
54bfffa
chore: Add .aider* to .gitignore
lloydchang Dec 2, 2024
2506a99
fix: errors introduced by llm earlier
lloydchang Dec 2, 2024
1140caa
docs(MAC.md): amend with scripts directory
lloydchang Dec 2, 2024
5dacbf7
fix: reorganize scripts
lloydchang Dec 2, 2024
11824b9
fix: use /opt/homebrew/bin/bash during bash version check
lloydchang Dec 2, 2024
b993449
fix: bash version checks vary between 3+ or 5+
lloydchang Dec 2, 2024
1fe7bd4
fix: bash version checks
lloydchang Dec 2, 2024
66c86b9
docs(README.md): reorganize scripts
lloydchang Dec 2, 2024
0f9f7be
fix: bash version checks
lloydchang Dec 2, 2024
3f89884
fix: directory to run python commands
lloydchang Dec 2, 2024
42459ac
fix: bash version checks
lloydchang Dec 2, 2024
5226b19
fix: reorganize scripts
lloydchang Dec 2, 2024
4d5b996
fix: reorganize scripts
lloydchang Dec 2, 2024
41d7724
fix: reorganize scripts
lloydchang Dec 2, 2024
580c276
fix: use gpt-4 which maps to gemini-1.5-flash-latest
lloydchang Dec 3, 2024
8d3ac11
fix: example for MAC.md
lloydchang Dec 3, 2024
ac30616
fix: typos and add clarification
lloydchang Dec 3, 2024
2681b2b
fix: add comments that demonstrate using gemini-openai-proxy and Gemini
lloydchang Dec 3, 2024
5faa73d
fix: demonstrate export GEMINI_API_KEY= usage before wintermute
lloydchang Dec 3, 2024
4600c40
Update pyproject.toml
andreashappe Dec 3, 2024
7cebc03
feat: add gpt-4o, gpt-4o-mini, o1-preview, o1-mini
lloydchang Oct 29, 2024
a4f5e8f
fix: remove logging comments
lloydchang Oct 29, 2024
71924f5
Merge branch 'main' into development
andreashappe Dec 3, 2024
6eceab6
Merge branch 'development' of github.com:ipa-lab/hackingBuddyGPT into…
Neverbolt Dec 10, 2024
161c44b
remove removed viewer script
Neverbolt Dec 10, 2024
7b3ebb9
Adds documentation about the beta viewer feature
Neverbolt Dec 10, 2024
ebc5000
Remove unnecessary import
Neverbolt Dec 10, 2024
fa5b3f6
updated "hackingBuddyGPT in the News"
Neverbolt Dec 10, 2024
1b7dd1a
Merge pull request #79 from ipa-lab/log_infrastructure
andreashappe Dec 10, 2024
5db5fa8
Merge branch 'development' into new-branch-1
andreashappe Dec 10, 2024
178abe4
Merge pull request #99 from ipa-lab/new-branch-1
andreashappe Dec 10, 2024
c56b506
Add changes to enable keyfile and user/pass auth.
jamfish Mar 19, 2025
40fb71c
Reflect keyauth change in env example files and README.
jamfish Mar 19, 2025
17e1799
Forgot a comma. Found bug during re-testing.
jamfish Mar 19, 2025
d2421f7
Merge pull request #108 from jamfish/jf-sshauthupdate
andreashappe Mar 20, 2025
9edd714
removed parameter keyfilename from __call__
Qsan1 Apr 8, 2025
1930e84
added changes to common infrastructure for thesis prototype
Qsan1 Apr 9, 2025
d5c393c
added base structure for the new usecase
Qsan1 Apr 9, 2025
254b136
added history compression component
Qsan1 Apr 10, 2025
dd79f4b
added Chain of thought component
Qsan1 Apr 10, 2025
d97ebd8
added structure via prompt component
Qsan1 Apr 10, 2025
39ea28c
fixed typo
Qsan1 Apr 10, 2025
5413c13
added Analyze component
Qsan1 Apr 10, 2025
e8940dc
added .gitignore
Qsan1 Apr 11, 2025
665aa27
added rag dependencies to pyproject.toml
Qsan1 Apr 11, 2025
0d1544b
added rag parameters to .env.example
Qsan1 Apr 11, 2025
3d0ef1c
added RAG component
Qsan1 Apr 11, 2025
be14eed
added README.md
Qsan1 Apr 11, 2025
fc7182a
updated README.md
Qsan1 Apr 11, 2025
680e50e
typo
Qsan1 Apr 14, 2025
0cf164b
Merge pull request #111 from CoralStack/development
andreashappe Apr 14, 2025
7982e1e
improve configurable type handling
Neverbolt Apr 15, 2025
2b057a1
moves rag-usecase dependencies into main dependency (cannot run other…
Neverbolt Apr 15, 2025
8351ff0
formatting pyproject.toml
Neverbolt Apr 21, 2025
952467d
first look at the new argument parsing :)
Neverbolt Apr 21, 2025
e92faaa
The configuration is now complete
Neverbolt Apr 24, 2025
6feaa9b
marks next api_key secret
Neverbolt Apr 24, 2025
98d3204
new documentation about viewer, replayer and config
Neverbolt Apr 24, 2025
b4aed9d
Merge pull request #113 from Neverbolt/logging_documentation
andreashappe Apr 24, 2025
18dbcc4
Merge branch 'main' into development
andreashappe Apr 24, 2025
5cdaad5
bump version to 0.4.0
andreashappe Apr 24, 2025
9f3d4d3
Potential fix for code scanning alert no. 1: Incomplete URL substring…
andreashappe Apr 24, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,26 @@ conn.port=2222

# exchange with the user for your target VM
conn.username='bob'
#To just use keyauth only, use '' with no space for conn.password
#Otherwise, insert the password for instance here
conn.password='secret'
#To just use username and password auth only, use '' with no space for conn.keyfilename
#Otherwise, insert the filepath for the keyfile here (for example, '/home/bob/.ssh/sshkey.rsa')
conn.keyfilename=''

# which LLM model to use (can be anything openai supports, or if you use a custom llm.api_url, anything your api provides for the model parameter
llm.model='gpt-3.5-turbo'
llm.context_size=16385

# how many rounds should this thing go?
max_turns = 20
max_turns = 20

# The following four parameters are only relevant for the usecase rag
# rag_database_folder_name: Name of the folder where the vector store will be saved.
# rag_embedding: The name of the embedding model used. Currently only OpenAI api supported.
# openai_api_key: API key that is used for the embedding model.
# rag_return_token_limit: The upper bound for the RAG output.
rag_database_folder_name = "vetorDB"
rag_embedding = "text-embedding-3-small"
openai_api_key = 'your-openai-key'
rag_return_token_limit = 1000
23 changes: 23 additions & 0 deletions .env.example.aws
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
llm.api_key='your-openai-key'
log_db.connection_string='log_db.sqlite3'

# exchange with the IP of your target VM
conn.host='enter the public IP of AWS Instance'
conn.hostname='DNS of AWS Instance '
conn.port=22

# user of target AWS Instance
conn.username='bob'
#To just use keyauth only, use '' with no space for conn.password
#Otherwise, insert the password for instance here
conn.password=''
#To just use username and password auth only, use '' with no space for conn.keyfilename
#Otherwise, insert the filepath for the keyfile here (for example, '/home/bob/.ssh/awskey.pem')
conn.keyfilename='/home/bob/.ssh/awskey.pem'

# which LLM model to use (can be anything openai supports, or if you use a custom llm.api_url, anything your api provides for the model parameter
llm.model='gpt-3.5-turbo'
llm.context_size=16385

# how many rounds should this thing go?
max_turns = 20
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
.env
venv/
.venv/
__pycache__/
*.swp
*.log
Expand Down
125 changes: 101 additions & 24 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -85,38 +85,38 @@ template_next_cmd = Template(filename=str(template_dir / "next_cmd.txt"))


class MinimalLinuxPrivesc(Agent):

conn: SSHConnection = None

_sliding_history: SlidingCliHistory = None
_max_history_size: int = 0

def init(self):
super().init()

self._sliding_history = SlidingCliHistory(self.llm)
self._max_history_size = self.llm.context_size - llm_util.SAFETY_MARGIN - self.llm.count_tokens(template_next_cmd.source)

self.add_capability(SSHRunCommand(conn=self.conn), default=True)
self.add_capability(SSHTestCredential(conn=self.conn))
self._template_size = self.llm.count_tokens(template_next_cmd.source)

def perform_round(self, turn: int) -> bool:
got_root: bool = False

with self._log.console.status("[bold green]Asking LLM for a new command..."):
# get as much history as fits into the target context size
history = self._sliding_history.get_history(self.llm.context_size - llm_util.SAFETY_MARGIN - self._template_size)
@log_conversation("Asking LLM for a new command...")
def perform_round(self, turn: int, log: Logger) -> bool:
# get as much history as fits into the target context size
history = self._sliding_history.get_history(self._max_history_size)

# get the next command from the LLM
answer = self.llm.get_response(template_next_cmd, capabilities=self.get_capability_block(), history=history, conn=self.conn)
cmd = llm_util.cmd_output_fixer(answer.result)
# get the next command from the LLM
answer = self.llm.get_response(template_next_cmd, capabilities=self.get_capability_block(), history=history, conn=self.conn)
message_id = log.call_response(answer)

with self._log.console.status("[bold green]Executing that command..."):
self._log.console.print(Panel(answer.result, title="[bold cyan]Got command from LLM:"))
result, got_root = self.get_capability(cmd.split(" ", 1)[0])(cmd)
# clean the command, load and execute it
cmd = llm_util.cmd_output_fixer(answer.result)
capability, arguments = cmd.split(" ", 1)
result, got_root = self.run_capability(message_id, "0", capability, arguments, calling_mode=CapabilityCallingMode.Direct, log=log)

# log and output the command and its result
self._log.log_db.add_log_query(self._log.run_id, turn, cmd, result, answer)
# store the results in our local history
self._sliding_history.add_command(cmd, result)
self._log.console.print(Panel(result, title=f"[bold cyan]{cmd}"))

# if we got root, we can stop the loop
# signal if we were successful in our task
return got_root


Expand Down Expand Up @@ -169,21 +169,65 @@ $ source ./venv/bin/activate
# install python requirements
$ pip install -e .

# copy default .env.example
# copy default .env.example
$ cp .env.example .env

# NOTE: if you are trying to use this with AWS or ssh-key only authentication, copy .env.example.aws
$ cp .env.example.aws .env

# IMPORTANT: setup your OpenAI API key, the VM's IP and credentials within .env
$ vi .env

# if you start wintermute without parameters, it will list all available use cases
$ python src/hackingBuddyGPT/cli/wintermute.py
usage: wintermute.py [-h]
{LinuxPrivesc,WindowsPrivesc,ExPrivEscLinux,ExPrivEscLinuxTemplated,ExPrivEscLinuxHintFile,ExPrivEscLinuxLSE,MinimalWebTesting,WebTestingWithExplanation,SimpleWebAPITesting,SimpleWebAPIDocumentation}
...
wintermute.py: error: the following arguments are required: {LinuxPrivesc,WindowsPrivesc,ExPrivEscLinux,ExPrivEscLinuxTemplated,ExPrivEscLinuxHintFile,ExPrivEscLinuxLSE,MinimalWebTesting,WebTestingWithExplanation,SimpleWebAPITesting,SimpleWebAPIDocumentation}
No command provided
usage: src/hackingBuddyGPT/cli/wintermute.py <command> [--help] [--config config.json] [options...]

commands:
ExPrivEscLinux Showcase Minimal Linux Priv-Escalation
ExPrivEscLinuxTemplated Showcase Minimal Linux Priv-Escalation
LinuxPrivesc Linux Privilege Escalation
WindowsPrivesc Windows Privilege Escalation
ExPrivEscLinuxHintFile Linux Privilege Escalation using hints from a hint file initial guidance
ExPrivEscLinuxLSE Linux Privilege Escalation using lse.sh for initial guidance
WebTestingWithExplanation Minimal implementation of a web testing use case while allowing the llm to 'talk'
SimpleWebAPIDocumentation Minimal implementation of a web API testing use case
SimpleWebAPITesting Minimal implementation of a web API testing use case
Viewer Webserver for (live) log viewing
Replayer Tool to replay the .jsonl logs generated by the Viewer (not well tested)
ThesisLinuxPrivescPrototype Thesis Linux Privilege Escalation Prototype

# to get more information about how to configure a use case you can call it with --help
$ python src/hackingBuddyGPT/cli/wintermute.py LinuxPrivesc --help
usage: src/hackingBuddyGPT/cli/wintermute.py LinuxPrivesc [--help] [--config config.json] [options...]

--log.log_server_address='localhost:4444' address:port of the log server to be used (default from builtin)
--log.tag='' Tag for your current run (default from builtin)
--log='local_logger' choice of logging backend (default from builtin)
--log_db.connection_string='wintermute.sqlite3' sqlite3 database connection string for logs (default from builtin)
--max_turns='30' (default from .env file, alternatives: 10 from builtin)
--llm.api_key=<secret> OpenAI API Key (default from .env file)
--llm.model OpenAI model name
--llm.context_size='100000' Maximum context size for the model, only used internally for things like trimming to the context size (default from .env file)
--llm.api_url='https://api.openai.com' URL of the OpenAI API (default from builtin)
--llm.api_path='/v1/chat/completions' Path to the OpenAI API (default from builtin)
--llm.api_timeout=240 Timeout for the API request (default from builtin)
--llm.api_backoff=60 Backoff time in seconds when running into rate-limits (default from builtin)
--llm.api_retries=3 Number of retries when running into rate-limits (default from builtin)
--system='linux' (default from builtin)
--enable_explanation=False (default from builtin)
--enable_update_state=False (default from builtin)
--disable_history=False (default from builtin)
--hint='' (default from builtin)
--conn.host
--conn.hostname
--conn.username
--conn.password
--conn.keyfilename
--conn.port='2222' (default from .env file, alternatives: 22 from builtin)
```

## Provide a Target Machine over SSH
### Provide a Target Machine over SSH

The next important part is having a machine that we can run our agent against. In our case, the target machine will be situated at `192.168.122.151`.

Expand All @@ -193,6 +237,23 @@ We are using vulnerable Linux systems running in Virtual Machines for this. Neve
>
> We are using virtual machines from our [Linux Privilege-Escalation Benchmark](https://github.com/ipa-lab/benchmark-privesc-linux) project. Feel free to use them for your own research!

## Using the web based viewer and replayer

If you want to have a better representation of the agent's output, you can use the web-based viewer. You can start it using `wintermute Viewer`, which will run the server on `http://127.0.0.1:4444` for the default `wintermute.sqlite3` database. You can change these options using the `--log_server_address` and `--log_db.connection_string` parameters.

Navigating to the log server address will show you an overview of all runs and clicking on a run will show you the details of that run. The viewer updates live using a websocket connection, and if you enable `Follow new runs` it will automatically switch to the new run when one is started.

Keep in mind that there is no additional protection for this webserver, other than how it can be reached (per default binding to `127.0.0.1` means it can only be reached from your local machine). If you make it accessible to the internet, everybody will be able to see all of your runs and also be able to inject arbitrary data into the database.

Therefore **DO NOT** make it accessible to the internet if you're not super sure about what you're doing!

There is also the experimental replay functionality, which can replay a run live from a capture file, including timing information. This is great for showcases and presentations, because it looks like everything is happening live and for real, but you know exactly what the results will be.

To use this, the run needs to be captured by a Viewer server by setting `--save_playback_dir` to a directory where the viewer can write the capture files.

With the Viewer server still running, you can then start `wintermute Replayer --replay_file <path_to_capture_file>` to replay the captured run (this will create a new run in the database).
You can configure it to `--pause_on_message` and `--pause_on_tool_calls`, which will interrupt the replay at the respective points until enter is pressed in the shell where you run the Replayer in. You can also configure the `--playback_speed` to control the speed of the replay.

## Use Cases

GitHub Codespaces:
Expand Down Expand Up @@ -222,6 +283,22 @@ $ python src/hackingBuddyGPT/cli/wintermute.py LinuxPrivesc --llm.api_key=sk...C
$ pip install '.[testing]'
```

## Beta Features

### Viewer

The viewer is a simple web-based tool to view the results of hackingBuddyGPT runs. It is currently in beta and can be started with:

```bash
$ hackingBuddyGPT Viewer
```

This will start a webserver on `http://localhost:4444` that can be accessed with a web browser.

To log to this central viewer, you currently need to change the `GlobalLogger` definition in [./src/hackingBuddyGPT/utils/logging.py](src/hackingBuddyGPT/utils/logging.py) to `GlobalRemoteLogger`.

This feature is not fully tested yet and therefore is not recommended to be exposed to the internet!

## Publications about hackingBuddyGPT

Given our background in academia, we have authored papers that lay the groundwork and report on our efforts:
Expand Down
34 changes: 34 additions & 0 deletions publish_notes.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
# how to publish to pypi

## start with testing if the project builds and tag the version

```bash
python -m venv venv
source venv/bin/activate
pip install -e .
pytest
git tag v0.3.0
git push origin v0.3.0
```

## build and new package

(according to https://packaging.python.org/en/latest/tutorials/packaging-projects/)

```bash
pip install build twine
python3 -m build
vi ~/.pypirc
twine check dist/*
```

Now, for next time.. test install the package in a new vanilla environment, then..

```bash
twine upload dist/*
```

## repo todos

- rebase development upon main
- bump the pyproject version number to a new `-dev`
69 changes: 47 additions & 22 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,20 @@ build-backend = "setuptools.build_meta"

[project]
name = "hackingBuddyGPT"
# original author was Andreas Happe, for an up-to-date list see
# https://github.com/ipa-lab/hackingBuddyGPT/graphs/contributors
authors = [
{ name = "Andreas Happe", email = "[email protected]" }
{ name = "HackingBuddyGPT maintainers", email = "[email protected]" }
]
maintainers = [
{ name = "Andreas Happe", email = "[email protected]" },
{ name = "Juergen Cito", email = "juergen.cito@tuwiena.c.at" }
{ name = "Juergen Cito", email = "juergen.cito@tuwien.ac.at" }
]
description = "Helping Ethical Hackers use LLMs in 50 lines of code"
readme = "README.md"
keywords = ["hacking", "pen-testing", "LLM", "AI", "agent"]
requires-python = ">=3.10"
version = "0.3.1"
version = "0.4.0"
license = { file = "LICENSE" }
classifiers = [
"Programming Language :: Python :: 3",
Expand All @@ -24,19 +26,30 @@ classifiers = [
"Development Status :: 4 - Beta",
]
dependencies = [
'fabric == 3.2.2',
'Mako == 1.3.2',
'requests == 2.32.3',
'rich == 13.7.1',
'tiktoken == 0.8.0',
'instructor == 1.7.2',
'PyYAML == 6.0.1',
'python-dotenv == 1.0.1',
'pypsexec == 0.3.0',
'pydantic == 2.8.2',
'openai == 1.65.2',
'BeautifulSoup4',
'nltk'
'fabric == 3.2.2',
'Mako == 1.3.2',
'requests == 2.32.3',
'rich == 13.7.1',
'tiktoken == 0.8.0',
'instructor == 1.7.2',
'PyYAML == 6.0.1',
'python-dotenv == 1.0.1',
'pypsexec == 0.3.0',
'pydantic == 2.8.2',
'openai == 1.65.2',
'BeautifulSoup4',
'nltk',
'fastapi == 0.114.0',
'fastapi-utils == 0.7.0',
'jinja2 == 3.1.4',
'uvicorn[standard] == 0.30.6',
'dataclasses_json == 0.6.7',
'websockets == 13.1',
'langchain-community',
'langchain-openai',
'markdown',
'chromadb',
'langchain-chroma',
]

[project.urls]
Expand All @@ -54,15 +67,27 @@ where = ["src"]

[tool.pytest.ini_options]
pythonpath = "src"
addopts = [
"--import-mode=importlib",
]
addopts = ["--import-mode=importlib"]
[project.optional-dependencies]
testing = [
'pytest',
'pytest-mock'
testing = ['pytest', 'pytest-mock']
dev = [
'ruff',
]
rag-usecase = [
'langchain-community',
'langchain-openai',
'markdown',
'chromadb',
'langchain-chroma',
]

[project.scripts]
wintermute = "hackingBuddyGPT.cli.wintermute:main"
hackingBuddyGPT = "hackingBuddyGPT.cli.wintermute:main"

[tool.ruff]
line-length = 120

[tool.ruff.lint]
select = ["E", "F", "B", "I"]
ignore = ["E501", "F401", "F403"]
12 changes: 10 additions & 2 deletions src/hackingBuddyGPT/capabilities/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
from .capability import Capability
from .psexec_test_credential import PSExecTestCredential
from .psexec_run_command import PSExecRunCommand
from .psexec_test_credential import PSExecTestCredential
from .ssh_run_command import SSHRunCommand
from .ssh_test_credential import SSHTestCredential
from .ssh_test_credential import SSHTestCredential

__all__ = [
"Capability",
"PSExecRunCommand",
"PSExecTestCredential",
"SSHRunCommand",
"SSHTestCredential",
]
Loading