Skip to content

Commit

Permalink
Refactor message history and cycling
Browse files Browse the repository at this point in the history
  • Loading branch information
erik-megarad committed Jun 14, 2023
1 parent 5c6a959 commit ff898f3
Show file tree
Hide file tree
Showing 8 changed files with 218 additions and 98 deletions.
76 changes: 47 additions & 29 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.llm.base import ChatSequence
from autogpt.llm.base import ChatSequence, MessageCycle
from autogpt.llm.chat import chat_with_ai, create_chat_completion
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
from autogpt.llm.utils import count_string_tokens
Expand Down Expand Up @@ -119,7 +119,7 @@ def signal_handler(signum, frame):
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
[m.raw() for m in self.history],
[m.raw() for m in self.history.messages],
FULL_MESSAGE_HISTORY_FILE_NAME,
)
if (
Expand All @@ -133,6 +133,7 @@ def signal_handler(signum, frame):
f"{self.config.continuous_limit}",
)
break

# Send message to AI, get response
with Spinner("Thinking... ", plain_output=self.config.plain_output):
assistant_reply = chat_with_ai(
Expand All @@ -146,30 +147,41 @@ def signal_handler(signum, frame):
)

reply_content = assistant_reply.content
reply_content_json = {}
if reply_content:
# Sometimes the content can be duplicated, and this will contain 2 objects in one string
# TODO: Why is this?
reply_content = reply_content.split("}\n{")[0]

for plugin in self.config.plugins:
if not plugin.can_handle_post_planning():
continue
reply_content = plugin.post_planning(reply_content)

try:
reply_content_json = json.loads(reply_content)
# TODO: Validate
except json.JSONDecodeError as e:
logger.error(f"Could not decode response JSON")
logger.debug(f"Invalid response JSON: {reply_content_json}")
import pdb

pdb.set_trace()
# TODO: Not this
reply_content_json = {}

print_assistant_thoughts(
self.ai_name, reply_content_json, self.config.speak_mode
)
for plugin in self.config.plugins:
if not plugin.can_handle_post_planning():
continue
reply_content = plugin.post_planning(reply_content)

command_name = assistant_reply.function_call["name"]
arguments = json.loads(assistant_reply.function_call.get("arguments", "{}"))
try:
reply_content_json = json.loads(reply_content)
print_assistant_thoughts(
self.ai_name, reply_content_json, self.config.speak_mode
)
# TODO: Validate
except json.JSONDecodeError as e:
logger.error(f"Could not decode response JSON")
logger.debug(f"Invalid response JSON: {reply_content_json}")
reply_content = ""
else:
logger.warn("AI Response did not include content")

function_call = assistant_reply.function_call
if function_call:
# TODO: What should happen when there's no function call? The AI does this sometimes. Maybe when it
# thinks it's done
command_name = function_call.get("name")
if function_call.get("arguments"):
arguments = json.loads(
assistant_reply.function_call.get("arguments")
)
else:
arguments = {}

if self.config.speak_mode:
say_text(f"I want to execute {command_name}")
Expand Down Expand Up @@ -320,15 +332,21 @@ def signal_handler(signum, frame):

# Check if there's a result from the command append it to the message
# history
if result is not None:
self.history.add("system", result, "action_result")
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
self.history.add("system", "Unable to execute command", "action_result")
if not result:
result = "Unable to execute command"
logger.typewriter_log(
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
)

logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)

message_cycle = MessageCycle.construct(
user_input=self.triggering_prompt,
ai_response=assistant_reply.content,
result=command_result,
)
self.history.add(message_cycle)

def _resolve_pathlike_command_args(self, command_args):
if "directory" in command_args and command_args["directory"] in {"", "/"}:
command_args["directory"] = str(self.workspace.root)
Expand Down
12 changes: 6 additions & 6 deletions autogpt/commands/execute_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,20 +26,20 @@
"description": "The Python code to run",
"required": True,
},
"basename": {
"name": {
"required": True,
"type": "string",
"description": "A name to be given to the python file",
},
},
)
def execute_python_code(code: str, basename: str, agent: Agent) -> str:
def execute_python_code(code: str, name: str, agent: Agent) -> str:
"""Create and execute a Python file in a Docker container and return the STDOUT of the
executed code. If there is any data that needs to be captured use a print statement
Args:
code (str): The Python code to run
basename (str): A name to be given to the Python file
name (str): A name to be given to the Python file
Returns:
str: The STDOUT captured from the code when it ran
Expand All @@ -48,10 +48,10 @@ def execute_python_code(code: str, basename: str, agent: Agent) -> str:
directory = os.path.join(agent.config.workspace_path, ai_name, "executed_code")
os.makedirs(directory, exist_ok=True)

if not basename.endswith(".py"):
basename = basename + ".py"
if not name.endswith(".py"):
name = name + ".py"

path = os.path.join(directory, basename)
path = os.path.join(directory, name)

try:
with open(path, "w+", encoding="utf-8") as f:
Expand Down
131 changes: 121 additions & 10 deletions autogpt/llm/base.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,142 @@
from __future__ import annotations

from dataclasses import dataclass, field
from enum import Enum
from math import ceil, floor
from typing import List, Literal, TypedDict
from typing import List, Literal, TypedDict, Optional, Union, Any


class MessageType(Enum):
AI_RESPONSE = "ai_response"
AI_FUNCTION_CALL = "ai_function_call"
ACTION_RESULT = "action_result"


class MessageRole(Enum):
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
FUNCTION = "function"

MessageRole = Literal["system", "user", "assistant"]
MessageType = Literal["ai_response", "action_result"]

TText = list[int]
"""Token array representing tokenized text"""


class MessageDict(TypedDict):
role: MessageRole
content: str
role: str
content: Optional[str]
name: Optional[str]

function_call: Optional[dict]


class MessageCycle:
user_input: Message
ai_response: Message
ai_function_response: Message
result: Message

def __init__(
self,
user_input: Message,
ai_response: Message,
result: Message,
ai_function_response: Optional[Message] = None,
):
self.user_input = user_input
self.ai_response = ai_response
self.ai_function_response = ai_function_response
self.result = result

@property
def messages(self) -> list[Message]:
messages = [
self.user_input,
self.ai_response,
self.result,
]
if self.ai_function_response:
messages.append(self.ai_function_response)

return messages

@classmethod
def construct(
cls,
user_input: str,
result: Any,
ai_function_call: Optional[dict] = None,
ai_function_response: Optional[str] = None,
ai_response: Optional[str] = None,
) -> "MessageCycle":
ai_response_message = Message(role=MessageRole.ASSISTANT, content=result)
if ai_function_call:
ai_response_message.function_name = ai_function_call.get("name")
ai_response_message.function_arguments = ai_function_call.get("arguments")

return cls(
user_input=Message(role=MessageRole.USER, content=user_input),
ai_response=ai_response_message,
result=Message(role=MessageRole.SYSTEM, content=result),
# TODO: How do messages with the function role look? The docs are sparse
# ai_function_response=Message(
# role=MessageRole.FUNCTION, content=ai_function_response
# ),
)


@dataclass
class Message:
"""OpenAI Message object containing a role and the message content"""

role: MessageRole
content: str
type: MessageType | None = None
def __init__(
self,
role: Union[MessageRole, str],
content: Optional[str] = None,
function_name: Optional[str] = None,
function_arguments: Optional[str] = None,
message_type: Optional[MessageType] = None,
):
if type(role) == MessageRole:
self.role = role.value
else:
self.role: str = role

self.function_name = function_name
# This is a JSON-encoded string
self.function_arguments = function_arguments
self.message_type = message_type

# Docs say:
# content string Optional
# The contents of the message. content is required for all messages except assistant messages
# with function calls.
# This is an assistant message with a function call, but it will error out if we don't include content.
if not content:
if function_name and self.role == MessageRole.ASSISTANT.value:
self.content = ""
else:
raise ValueError(
"Content is required for all messages except assistant messages with function calls."
)
self.content = content

def raw(self) -> MessageDict:
return {"role": self.role, "content": self.content}
raw_message = {"role": self.role}

if self.content:
raw_message["content"] = self.content

if self.role == MessageRole.FUNCTION.value:
raw_message["name"] = self.function_name

if self.function_name:
raw_message["function_call"] = {
"name": self.function_name,
"arguments": self.function_arguments or {},
}

return raw_message


@dataclass
Expand Down
14 changes: 9 additions & 5 deletions autogpt/llm/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from autogpt.config import Config
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.base import ChatSequence, Message, MessageCycle
from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME
from autogpt.logs import logger
Expand Down Expand Up @@ -101,7 +101,7 @@ def chat_with_ai(

# Add Messages until the token limit is reached or there are no more messages to add.
for cycle in reversed(list(agent.history.per_cycle())):
messages_to_add = [msg for msg in cycle if msg is not None]
messages_to_add = cycle.messages
tokens_to_add = count_message_tokens(messages_to_add, model)
if current_tokens_used + tokens_to_add > send_token_limit:
break
Expand Down Expand Up @@ -199,8 +199,12 @@ def chat_with_ai(
)

# Update full message history
agent.history.append(user_input_msg)
# TODO: Does this work if we just put function calls in history?
agent.history.add("assistant", assistant_reply.content, "ai_response")
message_cycle = MessageCycle.construct(
user_input=user_input_msg.content,
ai_response=assistant_reply.content,
result=assistant_reply.content,
ai_function_call=assistant_reply.function_call,
)
agent.history.add(message_cycle)

return assistant_reply
7 changes: 2 additions & 5 deletions autogpt/llm/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,17 +168,14 @@ def create_chat_completion(
"""
try:
completion: OpenAIObject = openai.ChatCompletion.create(
messages=messages,
**kwargs,
messages=messages, **kwargs
)
if not hasattr(completion, "error"):
logger.debug(f"Response: {completion}")
return completion
except Exception as e:
import pdb

pdb.set_trace()
logger.error(f"Error response from OpenAI: {e}")
raise e


@meter_api
Expand Down
5 changes: 5 additions & 0 deletions autogpt/llm/utils/token_counter.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Functions for counting the number of tokens in a message or string."""
from __future__ import annotations
import json

from typing import List

Expand Down Expand Up @@ -54,6 +55,10 @@ def count_message_tokens(
for message in messages:
num_tokens += tokens_per_message
for key, value in message.raw().items():
# FIXME? Use json dumped version of function call dict for guesstimation
if type(value) is not str:
value = json.dumps(value)

num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
Expand Down
Loading

0 comments on commit ff898f3

Please sign in to comment.