Skip to content

Commit

Permalink
Settings and Readme updates.
Browse files Browse the repository at this point in the history
  • Loading branch information
jtang613 committed Sep 12, 2024
1 parent fcac126 commit afda13c
Show file tree
Hide file tree
Showing 4 changed files with 66 additions and 30 deletions.
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,15 @@ Future Roadmap:
* Agentic assistant - Use Autogen or similar framework for self-guided binary RE.
* Model fine tuning - Leverage the RLHF dataset to fine tune the model.

## Quickstart

* If necessary, `pip install -r requirements.txt` from the plugin directory.
* Open Settings -> BinAssist.
* Ensure the RLHF and RAG database paths are appropriate for your environment.
* Point the API host to your prefered API provider and set the API key.
* Load a binary.
* Open BinAssist with the 'BA' sidebar icon and start exploring.

## Screenshot
![Screenshot](/res/screenshots.gif)

Expand All @@ -34,6 +43,8 @@ https://github.com/jtang613/BinAssist

An OpenAI compatible API is required. For local LLM support, use Ollama, LMStudio, Open-WebUI, Text-Generation-WebUI, etc.

`pip install -r requirements.txt`

### Windows

Untested but should work. Submit an Issue or Pull Request for support.
Expand Down
3 changes: 1 addition & 2 deletions plugin.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
"python3"
],
"description": "A plugin that provides LLM helpers to explain code and assist in RE.",
"longdescription": "This is a LLM plugin aimed at enabling the use of local LLM's (ollama, text-generation-webui, lm-studio, etc) for assisting with binary exploration and reverse engineering. It supports any OpenAI v1-compatible API. Recommended models are LLaMA-based models such as llama3.1:8b, but others should work as well.\n\nCurrent features include:\n* Explain the current function - Works at all IL levels.\n* Explain the current instruction - Works at all IL levels.\n* General query - Query the LLM directly from the UI.\n* Propose actions - Provide a list of proposed actions to apply.\n* Function calling - Allow agent to call functions to navigate the binary, rename functions and variables.\n* RLHF dataset generation - To enable model fine tuning.\n* RAG augmentation - Supports adding contextual documents to refine query effectiveness.\n* Settings to modify API host, key, model name and max tokens.\n\nFuture Roadmap:\n* Agentic assistant - Use Autogen or similar framework for self-guided binary RE.\n* Model fine tuning - Leverage the RLHF dataset to fine tune the model.\n\n## Screenshot\n![Screenshot](/res/screenshots.gif)\n\n## Homepage\nhttps://github.com/jtang613/BinAssist",
"license": {
"name": "MIT",
"text": "Copyright (c) 2024 Jason Tang\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."
Expand All @@ -36,5 +35,5 @@
},
"version": "0.1.1",
"author": "Jason Tang",
"minimumbinaryninjaversion": 3164
"minimumbinaryninjaversion": 4000
}
2 changes: 1 addition & 1 deletion src/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def createActionsTab(self) -> QtWidgets.QWidget:
self.filter_checkboxes = {}
for fn_dict in ToolCalling.FN_TEMPLATES:
if fn_dict["type"] == "function":
fn_name = f"{fn_dict["function"]["name"].replace('_',' ')}: {fn_dict["function"]["description"]}"
fn_name = f"{fn_dict['function']['name'].replace('_',' ')}: {fn_dict['function']['description']}"
checkbox = QtWidgets.QCheckBox(fn_name)
checkbox.setChecked(True) # Set all checkboxes to checked by default
self.filter_checkboxes[fn_name] = checkbox
Expand Down
80 changes: 53 additions & 27 deletions src/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,33 +27,59 @@ def _register_settings(self) -> None:
"""
self.register_group('binassist', 'BinAssist')

settings_definitions = [
('binassist.remote_host', 'Remote API Host', 'The API host endpoint used to make requests.', 'string', None),
('binassist.api_key', 'API Key', 'The API key used to make requests.', 'string', None),
('binassist.model', 'LLM Model', 'The LLM model used to generate the response.', 'string', 'gpt-4-turbo'),
('binassist.rlhf_db', 'RLHF Database Path', 'The to store the RLHF database.', 'string', 'rlhf_feedback.db'),
('binassist.max_tokens', 'Max Completion Tokens', 'The maximum number of tokens used for completion.', 'number', 8192, 1, 128*1024),
('binassist.rag_db_path', 'RAG Database Path', 'Path to store the RAG vector database.', 'string', 'binassist_rag_db'),
('binassist.use_rag', 'Use RAG', 'Enable Retrieval Augmented Generation for queries.', 'boolean', False),
]

for setting in settings_definitions:
if len(setting) == 5:
key, title, description, setting_type, default = setting
min_value, max_value = None, None
elif len(setting) == 7:
key, title, description, setting_type, default, min_value, max_value = setting

properties = {
'title': title,
'type': setting_type,
'description': description
settings_definitions = {
'binassist.remote_host': {
'title': 'Remote API Host',
'description': 'The API host endpoint used to make requests.',
'type': 'string',
'default': 'https://api.openai.com/v1'
},
'binassist.api_key': {
'title': 'API Key',
'description': 'The API key used to make requests.',
'type': 'string',
'default': None,
'ignore': ["SettingsProjectScope", "SettingsResourceScope"],
'hidden': True
},
'binassist.model': {
'title': 'LLM Model',
'description': 'The LLM model used to generate the response.',
'type': 'string',
'default': 'gpt-4o-mini'
},
'binassist.rlhf_db': {
'title': 'RLHF Database Path',
'description': 'The path to store the RLHF database.',
'type': 'string',
'default': 'rlhf_feedback.db',
'uiSelectionAction': 'file'
},
'binassist.max_tokens': {
'title': 'Max Completion Tokens',
'description': 'The maximum number of tokens used for completion.',
'type': 'number',
'default': 8192,
'minValue': 1,
'maxValue': 128*1024
},
'binassist.rag_db_path': {
'title': 'RAG Database Path',
'description': 'Path to store the RAG vector database.',
'type': 'string',
'default': 'binassist_rag_db',
'uiSelectionAction': 'directory'
},
'binassist.use_rag': {
'title': 'Use RAG',
'description': 'Enable Retrieval Augmented Generation for queries.',
'type': 'boolean',
'default': False
}
if default is not None:
properties['default'] = default
if min_value is not None and max_value is not None:
properties['minValue'] = min_value
properties['maxValue'] = max_value
properties['message'] = f"Min: {min_value}, Max: {max_value}"
}

for key, properties in settings_definitions.items():
if 'minValue' in properties and 'maxValue' in properties:
properties['message'] = f"Min: {properties['minValue']}, Max: {properties['maxValue']}"
self.register_setting(key, json.dumps(properties))

0 comments on commit afda13c

Please sign in to comment.