You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
importjsonimportrequestsfromproviders.base_providerimportBaseLLMProviderfromutils.auth_utilsimportget_api_keyclassFireworksProvider(BaseLLMProvider):
def__init__(self, api_url):
self.api_key=get_api_key()
self.api_url=api_urldefprocess_response(self, response):
ifresponse.status_code==200:
returnresponse.json()
else:
raiseException(f"Request failed with status code {response.status_code}")
defsend_request(self, data):
headers= {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
# Ensure data is a JSON stringifisinstance(data, dict):
json_data=json.dumps(data)
else:
json_data=dataresponse=requests.post(self.api_url, data=json_data, headers=headers)
returnresponse
providers\groq_provider.py
# groq_provider.pyimportjsonimportosimportrequestsimportstreamlitasstfromconfigs.config_localimportDEBUGfromproviders.base_providerimportBaseLLMProviderclassGroq_Provider(BaseLLMProvider):
def__init__(self, api_url, api_key):
self.api_key=api_keyifapi_url:
self.api_url=api_urlelse:
self.api_url="https://api.groq.com/openai/v1/chat/completions"defget_available_models(self):
ifDEBUG:
print ("GROQ: get_available_models")
#print (f"KEY: {self.api_key}")response=requests.get("https://api.groq.com/openai/v1/models", headers={
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
})
ifresponse.status_code==200:
models=response.json().get("data", [])
return [model["id"] formodelinmodels]
else:
raiseException(f"Failed to retrieve models: {response.status_code}")
defprocess_response(self, response):
ifresponse.status_code==200:
returnresponse.json()
else:
raiseException(f"Request failed with status code {response.status_code}")
defsend_request(self, data):
# Check for API key in environment variableapi_key=os.environ.get("GROQ_API_KEY")
# If not found in environment variable, check session stateifnotapi_key:
api_key=st.session_state.get("default_provider_key")
# If not found in session state, check global variableifnotapi_key:
api_key=globals().get("GROQ_API_KEY")
# If no API key is found, raise an exceptionifnotapi_key:
raiseException("No Groq API key found. Please provide an API key.")
headers= {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
# Ensure data is a JSON stringifisinstance(data, dict):
json_data=json.dumps(data)
else:
json_data=dataresponse=requests.post(self.api_url, data=json_data, headers=headers)
returnresponse
providers\lmstudio_provider.py
# lmstudio_provider.pyimportjsonimportrequestsimportstreamlitasstfromproviders.base_providerimportBaseLLMProviderclassLmstudioProvider(BaseLLMProvider):
def__init__(self, api_url, api_key=None):
self.api_url="http://localhost:1234/v1/chat/completions"defprocess_response(self, response):
ifresponse.status_code==200:
response_data=response.json()
if"choices"inresponse_data:
content=response_data["choices"][0]["message"]["content"]
return {
"choices": [
{
"message": {
"content": content.strip()
}
}
]
}
else:
raiseException("Unexpected response format. 'choices' field missing.")
else:
raiseException(f"Request failed with status code {response.status_code}")
defsend_request(self, data):
headers= {
"Content-Type": "application/json",
}
# Construct the request data in the format expected by the LM Studio APIlm_studio_request_data= {
"model": data["model"],
"messages": data["messages"],
"temperature": st.session_state.temperature,
"max_tokens": data.get("max_tokens", 2048),
"stop": data.get("stop", "TERMINATE"),
}
# Ensure data is a JSON stringifisinstance(lm_studio_request_data, dict):
json_data=json.dumps(lm_studio_request_data)
else:
json_data=lm_studio_request_dataresponse=requests.post(self.api_url, data=json_data, headers=headers)
returnresponse
providers\ollama_provider.py
importjsonimportrequestsimportstreamlitasstfromproviders.base_providerimportBaseLLMProviderclassOllamaProvider(BaseLLMProvider):
def__init__(self, api_url, api_key=None):
self.api_url="http://127.0.0.1:11434/api/generate"defprocess_response(self, response):
ifresponse.status_code==200:
response_data=response.json()
if"response"inresponse_data:
content=response_data["response"].strip()
ifcontent:
return {
"choices": [
{
"message": {
"content": content
}
}
]
}
else:
raiseException("Empty response received from the Ollama API.")
else:
raiseException("Unexpected response format. 'response' field missing.")
else:
raiseException(f"Request failed with status code {response.status_code}")
defsend_request(self, data):
headers= {
"Content-Type": "application/json",
}
# Construct the request data in the format expected by the Ollama APIollama_request_data= {
"model": data["model"],
"prompt": data["messages"][0]["content"],
"temperature": st.session_state.temperature,
"max_tokens": data.get("max_tokens", 2048),
"stop": data.get("stop", "TERMINATE"),
"stream": False,
}
# Ensure data is a JSON stringifisinstance(ollama_request_data, dict):
json_data=json.dumps(ollama_request_data)
else:
json_data=ollama_request_dataresponse=requests.post(self.api_url, data=json_data, headers=headers)
returnresponse
providers\openai_provider.py
# openai_provider.pyimportjsonimportosimportrequestsimportstreamlitasstfromconfigs.config_localimportDEBUGfromproviders.base_providerimportBaseLLMProviderclassOpenai_Provider(BaseLLMProvider):
def__init__(self, api_url, api_key):
self.api_key=os.environ.get("OPENAI_API_KEY")
self.api_url="https://api.openai.com/v1/chat/completions"defget_available_models(self):
ifDEBUG:
print ("GROQ: get_available_models")
#print (f"KEY: {self.api_key}")response=requests.get("https://api.openai.com/v1/models", headers={
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
})
ifresponse.status_code==200:
models=response.json().get("data", [])
return [model["id"] formodelinmodels]
else:
raiseException(f"Failed to retrieve models: {response.status_code}")
defprocess_response(self, response):
ifresponse.status_code==200:
returnresponse.json()
else:
raiseException(f"Request failed with status code {response.status_code}")
defsend_request(self, data):
print("self.api_url: ", self.api_url)
# Check for API key in environment variableapi_key=os.environ.get("OPENAI_API_KEY")
# If not found in environment variable, check session stateifnotapi_key:
api_key=st.session_state.get("default_provider_key")
# If not found in session state, check global variableifnotapi_key:
api_key=globals().get("OPENAI_API_KEY")
# If no API key is found, raise an exceptionifnotapi_key:
raiseException("No OpenAI API key found. Please provide an API key.")
headers= {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
# Ensure data is a JSON stringifisinstance(data, dict):
json_data=json.dumps(data)
else:
json_data=dataresponse=requests.post(self.api_url, data=json_data, headers=headers)
print("response.status_code: ", response.status_code)
print("response.text: ", response.text)
returnresponse
utils\auth_utils.py
importosimportstreamlitasstfromconfigs.config_localimportLLM_PROVIDERdefget_api_key():
api_key_env_var=f"{LLM_PROVIDER.upper()}_API_KEY"api_key=os.environ.get(api_key_env_var)
ifapi_keyisNone:
api_key=st.session_state.get(api_key_env_var)
returnapi_keydefget_api_url():
api_url_env_var=f"{LLM_PROVIDER.upper()}_API_URL"api_url=os.environ.get(api_url_env_var)
ifapi_urlisNone:
api_url=globals().get(api_url_env_var)
ifapi_urlisNone:
ifapi_url_env_varnotinst.session_state:
api_url=st.text_input(f"Enter the {LLM_PROVIDER.upper()} API URL:", type="password", key=f"{LLM_PROVIDER}_api_url_input")
ifapi_url:
st.session_state[api_url_env_var] =api_urlst.success("API URL entered successfully.")
else:
st.warning(f"Please enter the {LLM_PROVIDER.upper()} API URL to use the app.")
else:
api_url=st.session_state.get(api_url_env_var)
returnapi_url
utils\display_agent_util.py
# display_main_util.pyimportstreamlitasstfrombase_models.agent_base_modelimportAgentBaseModelfromconfigs.config_localimportDEBUGfromevent_handlers.event_handlers_agentimport (
handle_agent_close, handle_agent_selection, handle_ai_agent_creation,
handle_agent_name_change, handle_agent_property_change
)
defdisplay_agent_dropdown():
ifDEBUG:
print("display_agent_dropdown()")
ifst.session_state.current_agentisNone:
# Display the agents dropdownagent_names=AgentBaseModel.load_agents()
selected_agent=st.selectbox(
"Agents",
["Select..."] + ["Create with AI..."] + ["Create manually..."] +agent_names,
key="agent_dropdown",
on_change=handle_agent_selection,
)
ifselected_agent=="Select...":
returnifselected_agent=="Create manually...":
# Show the manual agent creation input fieldst.text_input("Agent Name:", key="agent_name_input", on_change=handle_agent_selection)
elifselected_agent=="Create with AI...":
# Show the AI-assisted agent creation input fieldst.text_input("What should this new agent do?", key="agent_creation_input", on_change=handle_ai_agent_creation)
else:
st.session_state.current_agent.name=st.text_input(
"Agent Name:",
value=st.session_state.current_agent.name,
key="agent_name_edit",
on_change=handle_agent_name_change,
)
ifst.button("CLOSE THIS AGENT"):
handle_agent_close()
defdisplay_agent_properties():
ifDEBUG:
print("display_agent_properties()")
agent=st.session_state.current_agentst.write("<div style='color:#33FFFC; font-weight:bold; text-align:right; width:100%;'>AGENT PROPERTIES</div>", unsafe_allow_html=True)
st.write(f"<div style='text-align:right; width:100%;'>Timestamp: {agent.timestamp}</div>", unsafe_allow_html=True)
agent.description=st.text_area("Description:", value=agent.descriptionor"", key=f"agent_description_{agent.name}", on_change=handle_agent_property_change)
agent.role=st.text_input("Role:", value=agent.roleor"", key=f"agent_role_{agent.name}", on_change=handle_agent_property_change)
agent.goal=st.text_input("Goal:", value=agent.goalor"", key=f"agent_goal_{agent.name}", on_change=handle_agent_property_change)
agent.backstory=st.text_area("Backstory:", value=agent.backstoryor"", key=f"agent_backstory_{agent.name}", on_change=handle_agent_property_change)
defdisplay_sidebar_agents():
ifDEBUG:
print("display_sidebar_agents()")
# Display each agent in the sidebar as a button with the agent's name on itagent_names=AgentBaseModel.load_agents()
ifagent_names:
foragent_nameinagent_names:
ifst.sidebar.button(agent_name):
st.write(f"Speaking to agent: {agent_name}")
utils\display_debug_util.py
# display_debug_util.pyimportstreamlitasstimportyamlfrombase_models.agent_base_modelimportAgentBaseModelfrombase_models.tool_base_modelimportToolBaseModelfrombase_models.project_base_modelimportProjectBaseModelfrombase_models.workflow_base_modelimportWorkflowBaseModelfromconfigs.config_localimportDEBUGdefdisplay_debug():
ifDEBUG:
st.write("Debug Information")
# Create expanders for each object typeproject_expander=st.expander("Project")
workflow_expander=st.expander("Workflow")
agent_expander=st.expander("Agent")
tool_expander=st.expander("Tool")
other_expander=st.expander("Other")
# Iterate over all session state variablesforkey, valueinst.session_state.items():
# Check if the value is an instance of specific classesifisinstance(value, ProjectBaseModel):
withproject_expander:
st.write(f"### {key}")
col1, col2=st.columns(2)
withcol1:
forprop, prop_valueinvalue.__dict__.items():
st.write(f"- **{prop}:** {prop_value}")
withcol2:
st.write(f"```yaml\n{yaml.dump(value.to_dict())}\n```")
elifisinstance(value, WorkflowBaseModel):
withworkflow_expander:
st.write(f"### {key}")
col1, col2=st.columns(2)
withcol1:
forprop, prop_valueinvalue.__dict__.items():
st.write(f"- **{prop}:** {prop_value}")
withcol2:
st.write(f"```yaml\n{yaml.dump(value.to_dict())}\n```")
elifisinstance(value, AgentBaseModel):
withagent_expander:
st.write(f"### {key}")
col1, col2=st.columns(2)
withcol1:
forprop, prop_valueinvalue.__dict__.items():
st.write(f"- **{prop}:** {prop_value}")
withcol2:
st.write(f"```yaml\n{yaml.dump(value.to_dict())}\n```")
elifisinstance(value, ToolBaseModel):
withtool_expander:
st.write(f"### {key}")
col1, col2=st.columns(2)
withcol1:
forprop, prop_valueinvalue.__dict__.items():
st.write(f"- **{prop}:** {prop_value}")
withcol2:
st.write(f"```yaml\n{yaml.dump(value.to_dict())}\n```")
else:
withother_expander:
st.write(f"### {key}")
st.write(f"```\n{value}\n```")
# display_files_util.pyimportosimportstreamlitasstfromconfigs.config_localimportDEBUGimportosimportstreamlitasstdefdisplay_files():
ifDEBUG:
print("display_files()")
# Define the folders to displayfolders= (
[
'agents/json', 'agents/yaml',
'projects/json','projects/yaml',
'tools/json', 'tools/yaml',
'workflows/json', 'workflows/yaml'
]
)
# Create a selectbox to choose the folderselected_folder=st.selectbox("Select a folder", folders)
# Get the list of files in the selected folderitems=os.listdir(selected_folder)
files= [itemforiteminitemsifos.path.isfile(os.path.join(selected_folder, item))]
iffiles:
# Create a selectbox to choose the fileselected_file=st.selectbox("Select a file", files)
# Display the content of the selected filefile_path=os.path.join(selected_folder, selected_file)
withopen(file_path, 'r') asfile:
file_content=file.read()
st.text_area("File content", file_content, height=400)
# Add a button to save changes to the fileifst.button("Save changes"):
withopen(file_path, 'w') asfile:
file.write(st.session_state.file_content)
st.success("File saved successfully.")
# Add a button to delete the fileifst.button("Delete file"):
os.remove(file_path)
st.success("File deleted successfully.")
else:
st.warning(f"No files found in the '{selected_folder}' folder.")
generate_agent_prompt: | Based on the rephrased agent request below, please do the following: 1. Do step-by-step reasoning and think to better understand the request. 2. Code the best Autogen Studio Python agent as per the request. 3. Always include the agent filename in the format `# Agent filename: [agent_name].py` as the first line of the code. 4. Return only the agent code, no commentary, intro, or other extra text. If there ARE any non-code lines, please pre-pend them with a '#' symbol to comment them out. 5. A proper agent will have these parts: a. Imports (import libraries needed for the agent) b. Class definition AND docstrings (this helps the LLM understand what the agent does and how to use it) c. Class methods (the actual code that implements the agent's behavior) d. (optional) Example usage - ALWAYS commented out Here is an example of a well formatted agent: # Agent filename: [agent_name].py # Import necessary module(s) import necessary_module class Agent: # docstrings """ : The name of the agent. An agent that performs tasks based on the given request. Methods: perform_task(args): Executes the task as per the request. """ def init(self, init_params): """ Initializes the Agent with the given parameters. Parameters: init_params (type): Description of initialization parameters. """ # Initialize with given parameters self.init_params = init_params def perform_task(self, task_params): """ Executes the task based on the given parameters. Parameters: task_params (type): Description of task parameters. Returns: return_type: Description of the return value. """ # Body of the method # Implement the task logic here pass # Example usage: # agent = Agent(init_params) # result = agent.perform_task(task_params) # print(result) Rephrased agent request: "{rephrased_agent_request}"
prompts\generate_tool_prompt.yaml
contributor: ScruffyNerfgenerate_tool_prompt: | Based on the rephrased tool request below, please do the following: 1. Do step-by-step reasoning and think to better understand the request. 2. Code the best Autogen Studio Python tool as per the request as a [tool_name].py file. 3. Return only the tool file, no commentary, intro, or other extra text. If there ARE any non-code lines, please pre-pend them with a '#' symbol to comment them out. 4. A proper tool will have these parts: a. Imports (import libraries needed for the tool) b. Function definition AND docstrings (this helps the LLM understand what the function does and how to use it) c. Function body (the actual code that implements the function) d. (optional) Example usage - ALWAYS commented out Here is an example of a well formatted tool: # Tool filename: save_file_to_disk.py # Import necessary module(s) import os def save_file_to_disk(contents, file_name): # docstrings """ Saves the given contents to a file with the given file name. Parameters: contents (str): The string contents to save to the file. file_name (str): The name of the file, including its extension. Returns: str: A message indicating the success of the operation. """ # Body of tool # Ensure the directory exists; create it if it doesn't directory = os.path.dirname(file_name) if directory and not os.path.exists(directory): os.makedirs(directory) # Write the contents to the file with open(file_name, 'w') as file: file.write(contents) return f"File {file_name} has been saved successfully." # Example usage: # contents_to_save = "Hello, world!" # file_name = "example.txt" # print(save_file_to_disk(contents_to_save, file_name)) Rephrased tool request: "{rephrased_tool_request}"
prompts\rephrase_prompt.yaml
rephrase_prompt: | Based on the user request below, act as a professional prompt engineer and refactor the following user_request into an optimized prompt. Your goal is to rephrase the request with a focus on the satisfying all following the criteria without explicitly stating them: 1. Clarity: Ensure the prompt is clear and unambiguous. 2. Specific Instructions: Provide detailed steps or guidelines. 3. Context: Include necessary background information. 4. Structure: Organize the prompt logically. 5. Language: Use concise and precise language. 6. Examples: Offer examples to illustrate the desired output. 7. Constraints: Define any limits or guidelines. 8. Engagement: Make the prompt engaging and interesting. 9. Feedback Mechanism: Suggest a way to improve or iterate on the response. Do NOT reply with a direct response to these instructions OR the original user request. Instead, rephrase the user's request as a well-structured prompt, and return ONLY that rephrased prompt. Do not preface the rephrased prompt with any other text or superfluous narrative. Do not enclose the rephrased prompt in quotes. You will be successful only if it returns a well-formed rephrased prompt ready for submission as an LLM request. User request: "{user_request}"
workflows\yaml\Accounting Workflow.yaml
agent_children: {}created_at: '2024-06-20T16:00:20.485947'description: "Create simple accounting app\n\rDevelop a intuitive and user-friendly\\ accounting application that provides real-time financial tracking and reporting\\ capabilities for small businesses and individuals. \n\nThe app should allow users\\ to categorize and track income and expenses, automatically generating detailed\\ reports and summaries. \n\nIt should also include features such as:\n\n* Budgeting\\ tools to set financial goals and track progress\n* Invoicing and payment processing\\ for customers\n* Multi-account support for separate tracking of personal and professional\\ finances\n* Integration with popular payment gateways and bank accounts\n* Real-time\\ analytics and insights to identify trends and make informed financial decisions\n\\nUse simple and intuitive language, with clear and concise navigation and minimal\\ clutter. The app should be accessible on desktop and mobile devices. \n\nProvide\\ examples of common financial workflows and scenarios, such as:\n\n* Creating and\\ sending invoices to customers\n* Tracking expenses for a small business\n* Creating\\ and managing a personal budget\n\nConsider the following constraints:\n\n* Compliance\\ with relevant financial regulations and data protection laws\n* User data encryption\\ and secure storage\n* Adaptability to accommodate future changes in financial\\ regulations and industry standards\n\nProvide detailed steps or guidelines for\\ users to create and manage their accounts, including setup processes, login credentials,\\ and password recovery procedures. \n\nOffer opportunities for users to provide\\ feedback and suggest improvements, such as a rating system, comment sections,\\ or survey mechanics."groupchat_config: {}id: 1name: Accounting Workflowreceiver:
agents: []config: {}groupchat_config: {}timestamp: '2024-06-20T16:00:20.485947'tools: []type: assistantuser_id: defaultsender:
config: {}timestamp: '2024-06-20T16:00:20.485947'tools: []type: userproxyuser_id: usersettings: {}summary_method: lasttimestamp: '2024-06-20T16:00:06.084418'type: twoagentsupdated_at: '2024-06-20T16:00:42.929738'user_id: user
workflows\yaml\Bookkeeping Workflow.yaml
agent_children: {}created_at: '2024-06-20T16:02:17.660468'description: "Develop a intuitive and user-friendly accounting application that provides\\ real-time financial tracking and reporting capabilities for small businesses and\\ individuals. \n\nThe app should allow users to categorize and track income and\\ expenses, automatically generating detailed reports and summaries. \n\nIt should\\ also include features such as:\n\n* Budgeting tools to set financial goals and\\ track progress\n* Invoicing and payment processing for customers\n* Multi-account\\ support for separate tracking of personal and professional finances\n* Integration\\ with popular payment gateways and bank accounts\n* Real-time analytics and insights\\ to identify trends and make informed financial decisions\n\nUse simple and intuitive\\ language, with clear and concise navigation and minimal clutter. The app should\\ be accessible on desktop and mobile devices. \n\nProvide examples of common financial\\ workflows and scenarios, such as:\n\n* Creating and sending invoices to customers\n\ * Tracking expenses for a small business\n* Creating and managing a personal budget\n\\nConsider the following constraints:\n\n* Compliance with relevant financial regulations\\ and data protection laws\n* User data encryption and secure storage\n* Adaptability\\ to accommodate future changes in financial regulations and industry standards\n\\nProvide detailed steps or guidelines for users to create and manage their accounts,\\ including setup processes, login credentials, and password recovery procedures.\\ \n\nOffer opportunities for users to provide feedback and suggest improvements,\\ such as a rating system, comment sections, or survey mechanics."groupchat_config: {}id: 1name: Bookkeeping Workflowreceiver:
agents: []config:
code_execution_config: nulldefault_auto_reply: ''description: A primary assistant agent that writes plans and code to solve tasks.human_input_mode: NEVERis_termination_msg: nullllm_config:
cache_seed: nullconfig_list:
- api_type: nullapi_version: nullbase_url: nulldescription: Groq_Provider model configurationmodel: llama3-8b-8192timestamp: '2024-06-20T16:02:17.660468'user_id: defaultextra_body: nullmax_tokens: nulltemperature: 0.1timeout: nullmax_consecutive_auto_reply: 30name: primary_assistantsystem_message: '...'groupchat_config: {}timestamp: '2024-06-20T16:02:17.660468'tools: &id001 []type: assistantuser_id: usersender:
config:
code_execution_config:
use_docker: falsework_dir: nulldefault_auto_reply: TERMINATEdescription: A user proxy agent that executes code.human_input_mode: NEVERis_termination_msg: nullllm_config:
cache_seed: nullconfig_list:
- api_type: nullapi_version: nullbase_url: nulldescription: Groq_Provider model configurationmodel: llama3-8b-8192timestamp: '2024-06-20T16:02:17.660468'user_id: defaultextra_body: nullmax_tokens: nulltemperature: 0.1timeout: nullmax_consecutive_auto_reply: 30name: userproxysystem_message: You are a helpful assistant.timestamp: '2024-06-20T16:02:17.660468'tools: *id001type: userproxyuser_id: usersettings: {}summary_method: lasttimestamp: '2024-06-20T16:02:17.660468'type: twoagentsupdated_at: nulluser_id: user
workflows\yaml\New Workflow.yaml
agent_children: {}created_at: '2024-06-20T16:08:01.566098'description: "Bookkeeper\n\rWrite a detailed description of a bookkeeper's tasks,\\ responsibilities, and skills, including examples of their work, organizational\\ habits, and time management strategies."groupchat_config: {}id: 1name: New Workflowreceiver:
agents: []config: {}groupchat_config: {}timestamp: '2024-06-20T16:08:01.564605'tools: []type: assistantuser_id: defaultsender:
config: {}timestamp: '2024-06-20T16:08:01.564605'tools: []type: userproxyuser_id: usersettings: {}summary_method: lasttimestamp: '2024-06-20T16:07:11.735808'type: twoagentsupdated_at: '2024-06-20T16:08:01.572448'user_id: user