Skip to content

Commit

Permalink
Basic working version of functions support
Browse files Browse the repository at this point in the history
  • Loading branch information
erik-megarad committed Jun 14, 2023
1 parent 39191a9 commit 78c3fdb
Show file tree
Hide file tree
Showing 27 changed files with 345 additions and 345 deletions.
96 changes: 66 additions & 30 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@

from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
from autogpt.llm.base import ChatSequence
from autogpt.llm.chat import chat_with_ai, create_chat_completion
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
Expand All @@ -23,6 +22,7 @@
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.memory.message_history import MessageHistory
from autogpt.memory.vector import VectorMemory
from autogpt.models.command_function import CommandFunction
from autogpt.models.command_registry import CommandRegistry
from autogpt.speech import say_text
from autogpt.spinner import Spinner
Expand Down Expand Up @@ -136,46 +136,49 @@ def signal_handler(signum, frame):
# Send message to AI, get response
with Spinner("Thinking... ", plain_output=self.config.plain_output):
assistant_reply = chat_with_ai(
self.config,
self,
self.system_prompt,
self.triggering_prompt,
self.fast_token_limit,
self.config.fast_llm_model,
config=self.config,
agent=self,
system_prompt=self.system_prompt,
user_input=self.triggering_prompt,
token_limit=self.fast_token_limit,
model=self.config.fast_llm_model,
functions=self.get_functions_from_commands(),
)

try:
assistant_reply_json = extract_json_from_response(assistant_reply)
validate_json(assistant_reply_json)
except json.JSONDecodeError as e:
logger.error(f"Exception while validating assistant reply JSON: {e}")
assistant_reply_json = {}
reply_content = assistant_reply.content

for plugin in self.config.plugins:
if not plugin.can_handle_post_planning():
continue
assistant_reply_json = plugin.post_planning(assistant_reply_json)

# Print Assistant thoughts
if assistant_reply_json != {}:
# Get command name and arguments
try:
print_assistant_thoughts(
self.ai_name, assistant_reply_json, self.config.speak_mode
)
command_name, arguments = get_command(assistant_reply_json)
if self.config.speak_mode:
say_text(f"I want to execute {command_name}")
reply_content = plugin.post_planning(reply_content)

arguments = self._resolve_pathlike_command_args(arguments)
try:
reply_content_json = json.loads(reply_content)
# TODO: Validate
except json.JSONDecodeError as e:
logger.error(f"Could not decode response JSON")
logger.debug(f"Invalid response JSON: {reply_content_json}")
import pdb

pdb.set_trace()
# TODO: Not this
reply_content_json = {}

print_assistant_thoughts(
self.ai_name, reply_content_json, self.config.speak_mode
)

command_name = assistant_reply.function_call["name"]
arguments = json.loads(assistant_reply.function_call.get("arguments", "{}"))

if self.config.speak_mode:
say_text(f"I want to execute {command_name}")

except Exception as e:
logger.error("Error: \n", str(e))
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
assistant_reply_json,
reply_content,
NEXT_ACTION_FILE_NAME,
)

Expand Down Expand Up @@ -368,7 +371,7 @@ def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
)

feedback = create_chat_completion(prompt)
feedback = create_chat_completion(prompt).content

self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
Expand All @@ -378,3 +381,36 @@ def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
SUPERVISOR_FEEDBACK_FILE_NAME,
)
return feedback

def get_functions_from_commands(self) -> list[CommandFunction]:
"""Get functions from the commands. "functions" in this context refers to OpenAI functions
see https://platform.openai.com/docs/guides/gpt/function-calling
"""
functions = []
for command in self.command_registry.commands.values():
properties = {}
required = []

for argument in command.arguments:
properties[argument.name] = {
"type": argument.type,
"description": argument.description,
}
if argument.required:
required.append(argument.name)

parameters = {
"type": "object",
"properties": properties,
"required": required,
}
functions.append(
CommandFunction(
name=command.name,
description=command.description,
parameters=parameters,
)
)

return functions
4 changes: 2 additions & 2 deletions autogpt/agent/agent_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def create_agent(
if plugin_messages := plugin.pre_instruction(messages.raw()):
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
# Start GPT instance
agent_reply = create_chat_completion(prompt=messages)
agent_reply = create_chat_completion(prompt=messages).content

messages.add("assistant", agent_reply)

Expand Down Expand Up @@ -92,7 +92,7 @@ def message_agent(self, key: str | int, message: str) -> str:
messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])

# Start GPT instance
agent_reply = create_chat_completion(prompt=messages)
agent_reply = create_chat_completion(prompt=messages).content

messages.add("assistant", agent_reply)

Expand Down
15 changes: 13 additions & 2 deletions autogpt/command_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.models.command import Command
from autogpt.models.command_argument import CommandArgument

# Unique identifier for auto-gpt commands
AUTO_GPT_COMMAND_IDENTIFIER = "auto_gpt_command"
Expand All @@ -12,11 +13,12 @@
def command(
name: str,
description: str,
signature: str,
arguments: dict = None,
enabled: bool | Callable[[Config], bool] = True,
disabled_reason: Optional[str] = None,
) -> Callable[..., Any]:
"""The command decorator is used to create Command objects from ordinary functions."""
arguments = arguments or {}

# TODO: Remove this in favor of better command management
CFG = Config()
Expand All @@ -29,11 +31,20 @@ def command(
return lambda func: func

def decorator(func: Callable[..., Any]) -> Command:
typed_arguments = [
CommandArgument(
name=arg_name,
description=argument.get("description"),
type=argument.get("type", "string"),
required=argument.get("required", False),
)
for arg_name, argument in arguments.items()
]
cmd = Command(
name=name,
description=description,
method=func,
signature=signature,
arguments=typed_arguments,
enabled=enabled,
disabled_reason=disabled_reason,
)
Expand Down
37 changes: 32 additions & 5 deletions autogpt/commands/execute_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,18 @@
@command(
"execute_python_code",
"Create a Python file and execute it",
'"code": "<code>", "basename": "<basename>"',
arguments={
"code": {
"type": "string",
"description": "The Python code to run",
"required": True,
},
"basename": {
"required": True,
"type": "string",
"description": "A name to be given to the python file",
},
},
)
def execute_python_code(code: str, basename: str, agent: Agent) -> str:
"""Create and execute a Python file in a Docker container and return the STDOUT of the
Expand Down Expand Up @@ -51,7 +62,17 @@ def execute_python_code(code: str, basename: str, agent: Agent) -> str:
return f"Error: {str(e)}"


@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
@command(
"execute_python_file",
"Execute an existing Python file",
arguments={
"filename": {
"type": "string",
"description": "The name of te file to execute",
"required": True,
},
},
)
def execute_python_file(filename: str, agent: Agent) -> str:
"""Execute a Python file in a Docker container and return the output
Expand Down Expand Up @@ -171,11 +192,17 @@ def validate_command(command: str, config: Config) -> bool:
@command(
"execute_shell",
"Execute Shell Command, non-interactive commands only",
'"command_line": "<command_line>"',
lambda cfg: cfg.execute_local_commands,
"You are not allowed to run local shell commands. To execute"
enabled=lambda cfg: cfg.execute_local_commands,
disabled_reason="You are not allowed to run local shell commands. To execute"
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
"in your config file: .env - do not attempt to bypass the restriction.",
arguments={
"command_line": {
"type": "string",
"description": "The command line to execute",
"required": True,
}
},
)
def execute_shell(command_line: str, agent: Agent) -> str:
"""Execute a shell command and return the output
Expand Down
41 changes: 32 additions & 9 deletions autogpt/commands/file_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,13 @@ def append_to_file(
return f"Error: {err}"


@command("read_file", "Read a file", '"filename": "<filename>"')
@command(
"read_file",
"Read an existing file",
arguments={
"filename": {"type": "string", "description": "The path of the file to read"}
},
)
def read_file(filename: str, agent: Agent) -> str:
"""Read a file and return the contents
Expand Down Expand Up @@ -190,7 +196,17 @@ def ingest_file(
logger.warn(f"Error while ingesting file '{filename}': {err}")


@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
@command(
"write_to_file",
"Write to file",
arguments={
"filename": {
"type": "string",
"description": "The name of the file to write to",
},
"text": {"type": "string", "description": "The text to write to the file"},
},
)
def write_to_file(filename: str, text: str, agent: Agent) -> str:
"""Write text to a file
Expand All @@ -215,7 +231,13 @@ def write_to_file(filename: str, text: str, agent: Agent) -> str:
return f"Error: {err}"


@command("delete_file", "Delete file", '"filename": "<filename>"')
@command(
"delete_file",
"Delete file",
arguments={
"filename": {"type": "string", "description": "The path of the file to delete"}
},
)
def delete_file(filename: str, agent: Agent) -> str:
"""Delete a file
Expand All @@ -235,19 +257,20 @@ def delete_file(filename: str, agent: Agent) -> str:
return f"Error: {err}"


@command("list_files", "List Files in Directory", '"directory": "<directory>"')
def list_files(directory: str, agent: Agent) -> list[str]:
@command(
"list_files",
"List files in the workspace",
arguments={},
)
def list_files(agent: Agent) -> list[str]:
"""lists files in a directory recursively
Args:
directory (str): The directory to search in
Returns:
list[str]: A list of files found in the directory
"""
found_files = []

for root, _, files in os.walk(directory):
for root, _, files in os.walk(agent.workspace.root):
for file in files:
if file.startswith("."):
continue
Expand Down
Loading

0 comments on commit 78c3fdb

Please sign in to comment.