Skip to content

Commit

Permalink
修复query input 及部分提示语
Browse files Browse the repository at this point in the history
  • Loading branch information
kaqijiang committed Apr 22, 2023
1 parent 369f08f commit ed2ff2b
Show file tree
Hide file tree
Showing 13 changed files with 135 additions and 128 deletions.
168 changes: 89 additions & 79 deletions .env.template
Original file line number Diff line number Diff line change
@@ -1,88 +1,97 @@
# 搜索OPENAI_API_KEY只设置OPENAI_API_KEY即可
# OPENAI_API_KEY=your-openai-api-key


################################################################################
### AUTO-GPT - GENERAL SETTINGS
### AUTO-GPT - 常规设置
################################################################################

## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
# EXECUTE_LOCAL_COMMANDS=False
# RESTRICT_TO_WORKSPACE=True
# EXECUTE_LOCAL_COMMANDS - 允许本地命令执行(示例:False)
EXECUTE_LOCAL_COMMANDS=False

## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
# RESTRICT_TO_WORKSPACE - 将文件操作限制在工作区 ./auto_gpt_workspace 中(默认为 True)
RESTRICT_TO_WORKSPACE=True

## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
# AI_SETTINGS_FILE=ai_settings.yaml
# USER_AGENT - 定义 requests 库用于浏览网站的用户代理 (字符串)
USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"

# AI_SETTINGS_FILE - 指定要使用的 AI 设置文件(默认为 ai_settings.yaml)
AI_SETTINGS_FILE=ai_settings.yaml

################################################################################
### LLM PROVIDER
### LLM提供者
################################################################################

### OPENAI
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
## USE_AZURE - Use Azure OpenAI or not (Default: False)
## OPENAI_API_KEY - OpenAI API 密钥(例如:my-openai-api-key)
OPENAI_API_KEY=your-openai-api-key
# TEMPERATURE=0
# USE_AZURE=False

## TEMPERATURE - 在 OpenAI 中设置temperature 准确性(默认为 0)0为最准 1的话返回的多样性比较多
TEMPERATURE=0

## USE_AZURE - 是否使用 Azure OpenAI(默认为 False)
USE_AZURE=False

### AZURE
# moved to `azure.yaml.template`

################################################################################
### LLM MODELS
### LLM 模型
################################################################################

## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
# SMART_LLM_MODEL=gpt-4
# FAST_LLM_MODEL=gpt-3.5-turbo
## SMART_LLM_MODEL - 智能语言模型(默认为 gpt-4
## FAST_LLM_MODEL - 快速语言模型(默认为 gpt-3.5-turbo
SMART_LLM_MODEL=gpt-4
FAST_LLM_MODEL=gpt-3.5-turbo

### LLM MODEL SETTINGS
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
## When using --gpt3only this needs to be set to 4000.
# FAST_TOKEN_LIMIT=4000
# SMART_TOKEN_LIMIT=8000
### LLM 模型设置
## FAST_TOKEN_LIMIT - OpenAI 的快速令牌限制(默认为 4000
## SMART_TOKEN_LIMIT - OpenAI 的智能令牌限制(默认为 8000
## 当使用 --gpt3only 时,此值需要设置为 4000
FAST_TOKEN_LIMIT=4000
SMART_TOKEN_LIMIT=8000

################################################################################
### MEMORY
################################################################################

### MEMORY_BACKEND - Memory backend type
## local - Default
## pinecone - Pinecone (if configured)
## redis - Redis (if configured)
## milvus - Milvus (if configured)
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
# MEMORY_BACKEND=local
# MEMORY_INDEX=auto-gpt
### MEMORY_BACKEND - 内存后端类型
## local - 默认
## pinecone - Pinecone(如果已配置)
## redis - Redis(如果已配置)
## milvus - Milvus(如果已配置)
## MEMORY_INDEX - 在内存后端中创建的索引名称(默认为 auto-gpt)

MEMORY_BACKEND=local
MEMORY_INDEX=auto-gpt

### PINECONE
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
## PINECONE_API_KEY - Pinecone API密钥(示例:my-pinecone-api-key
## PINECONE_ENV - Pinecone环境(区域)(示例:us-west-2
# PINECONE_API_KEY=your-pinecone-api-key
# PINECONE_ENV=your-pinecone-region

### REDIS
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
## REDIS_PORT - Redis port (Default: 6379)
## REDIS_PASSWORD - Redis password (Default: "")
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
## REDIS_HOST - Redis主机(默认值:localhost,使用“redis”进行docker-compose
## REDIS_PORT - Redis端口(默认值:6379
## REDIS_PASSWORD - Redis密码(默认值:"")
## WIPE_REDIS_ON_START - 开始时清除数据/索引(默认值:True
# REDIS_HOST=localhost
# REDIS_PORT=6379
# REDIS_PASSWORD=
# WIPE_REDIS_ON_START=True

### WEAVIATE
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
## WEAVIATE_HOST - Weaviate host IP
## WEAVIATE_PORT - Weaviate host port
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
## WEAVIATE_USERNAME - Weaviate username
## WEAVIATE_PASSWORD - Weaviate password
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
## MEMORY_BACKEND - 使用 'weaviate' 使用 Weaviate 向量存储
## WEAVIATE_HOST - Weaviate 主机 IP
## WEAVIATE_PORT - Weaviate 主机端口
## WEAVIATE_PROTOCOL - Weaviate 主机协议 (例如 'http')
## USE_WEAVIATE_EMBEDDED - 是否使用嵌入式 Weaviate
## WEAVIATE_EMBEDDED_PATH - 运行嵌入式 Weaviate 时要持久化数据的文件系统路径
## WEAVIATE_USERNAME - Weaviate 用户名
## WEAVIATE_PASSWORD - Weaviate 密码
## WEAVIATE_API_KEY - 如果使用基于 API 密钥的身份验证,则使用 Weaviate API 密钥

# WEAVIATE_HOST="127.0.0.1"
# WEAVIATE_PORT=8080
# WEAVIATE_PROTOCOL="http"
Expand All @@ -93,89 +102,90 @@ OPENAI_API_KEY=your-openai-api-key
# WEAVIATE_API_KEY=

### MILVUS
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530)
## MILVUS_COLLECTION - Milvus collection,
## change it if you want to start a new memory and retain the old memory.
## MILVUS_ADDR - Milvus 远程地址 (例如 localhost:19530)
## MILVUS_COLLECTION - Milvus 集合,
## 如果您想启动一个新的内存并保留旧的内存,请更改它。
# MILVUS_ADDR=your-milvus-cluster-host-port
# MILVUS_COLLECTION=autogpt

################################################################################
### IMAGE GENERATION PROVIDER
### 图像生成提供者
################################################################################

### OPEN AI
## IMAGE_PROVIDER - Image provider (Example: dalle)
## IMAGE_SIZE - Image size (Example: 256)
## IMAGE_PROVIDER - 图像提供商(例如:dalle
## IMAGE_SIZE - 图像尺寸(例如:256
## DALLE: 256, 512, 1024
# IMAGE_PROVIDER=dalle
# IMAGE_SIZE=256

### HUGGINGFACE
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
## HUGGINGFACE_IMAGE_MODEL - Huggingface的文本转图像模型(默认值:CompVis/stable-diffusion-v1-4
## HUGGINGFACE_API_TOKEN - HuggingFace API令牌(例如:my-huggingface-api-token
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token

### STABLE DIFFUSION WEBUI
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
## SD_WEBUI_AUTH - Stable diffusion webui的用户名和密码,例如username:password
## SD_WEBUI_URL - Stable diffusion webui的API URL,例如http://127.0.0.1:7860
# SD_WEBUI_AUTH=
# SD_WEBUI_URL=http://127.0.0.1:7860

################################################################################
### AUDIO TO TEXT PROVIDER
### 音频到文本提供者
################################################################################

### HUGGINGFACE
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h

################################################################################
### GIT Provider for repository actions
### 用于仓库操作的Git提供者
################################################################################

### GITHUB
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
## GITHUB_USERNAME - Github username
## GITHUB_API_KEY - Github API密钥/PAT(示例:github_pat_123
## GITHUB_USERNAME - Github用户名
# GITHUB_API_KEY=github_pat_123
# GITHUB_USERNAME=your-github-username

################################################################################
### WEB BROWSING
### WEB 浏览器
################################################################################

### BROWSER
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
## Note: set this to either 'chrome', 'firefox', or 'safari' depending on your current browser
# HEADLESS_BROWSER=True
# USE_WEB_BROWSER=chrome
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
## HEADLESS_BROWSER - 是否在无头模式下运行浏览器 (默认值:True)
## USE_WEB_BROWSER - 设置要与Selenium一起使用的Web浏览器驱动程序 (默认值:chrome)
## 注意:根据您当前的浏览器,将其设置为'chrome','firefox'或'safari'
HEADLESS_BROWSER=False
USE_WEB_BROWSER=chrome

## BROWSE_CHUNK_MAX_LENGTH - 在浏览网站时,定义要总结的块的长度(以标记数量计,不包括响应。通常明智的是FAST_TOKEN_LIMIT的75%)
# BROWSE_CHUNK_MAX_LENGTH=3000
## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm
## BROWSE_SPACY_LANGUAGE_MODEL 用于拆分句子。通过pip安装其他语言,并在此处设置模型名称。例如:python -m spacy download zh_core_web_sm
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm

### GOOGLE
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
## GOOGLE_API_KEY - 谷歌API密钥 (Example: my-google-api-key)
## CUSTOM_SEARCH_ENGINE_ID - 自定义搜索引擎ID (Example: my-custom-search-engine-id)
# GOOGLE_API_KEY=your-google-api-key
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id

################################################################################
### TTS PROVIDER
### TTS 提供者
################################################################################

### MAC OS
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
## USE_MAC_OS_TTS - 是否使用Mac OS TTS(默认:False
# USE_MAC_OS_TTS=False

### STREAMELEMENTS
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
## USE_BRIAN_TTS - 是否使用Brian TTS(默认:False
# USE_BRIAN_TTS=False

### ELEVENLABS
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
## ELEVENLABS_API_KEY - Eleven Labs API密钥(示例:my-elevenlabs-api-key
## ELEVENLABS_VOICE_1_ID - Eleven Labs语音1的ID(示例:my-voice-id-1
## ELEVENLABS_VOICE_2_ID - Eleven Labs语音2的ID(示例:my-voice-id-2
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
Expand Down
4 changes: 2 additions & 2 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def start_interaction_loop(self):
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().strip() == "":
print("Invalid input format.")
print("输入格式无效。")
continue
elif console_input.lower().startswith("y -"):
try:
Expand Down Expand Up @@ -160,7 +160,7 @@ def start_interaction_loop(self):
f"Command {command_name} 抛出以下错误: {arguments}"
)
elif command_name == "human_feedback":
result = f"Human feedback: {user_input}"
result = f"人工反馈: {user_input}"
else:
result = (
f"Command {command_name} returned: "
Expand Down
4 changes: 2 additions & 2 deletions autogpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,10 @@ def execute_command(command_name: str, arguments):
# search method
key = CFG.google_api_key
if key and key.strip() and key != "your-google-api-key":
google_result = google_official_search(arguments["query"])
google_result = google_official_search(arguments["input"])
return google_result
else:
google_result = google_search(arguments["query"])
google_result = google_search(arguments["input"])

# google_result can be a list or a string depending on the search results
if isinstance(google_result, list):
Expand Down
20 changes: 10 additions & 10 deletions autogpt/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,16 +103,16 @@ def main(
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
ai_name = ""
if not cfg.skip_news:
motd = get_latest_bulletin()
if motd:
logger.typewriter_log("NEWS: ", Fore.GREEN, motd)
# motd = get_latest_bulletin()
# if motd:
# logger.typewriter_log("NEWS: ", Fore.GREEN, motd)
git_branch = get_current_git_branch()
if git_branch and git_branch != "stable":
logger.typewriter_log(
"警告:",
Fore.RED,
f"您正在运行 {git_branch} 分支 - 这不是受支持的分支。",
)
# if git_branch and git_branch != "stable":
# logger.typewriter_log(
# "警告:",
# Fore.RED,
# f"您正在运行 {git_branch} 分支 - 这不是受支持的分支。",
# )
if sys.version_info < (3, 10):
logger.typewriter_log(
"警告:",
Expand All @@ -134,7 +134,7 @@ def main(
logger.typewriter_log(
"使用存储类型:", Fore.GREEN, f"{memory.__class__.__name__}"
)
logger.typewriter_log("使用浏览器r:", Fore.GREEN, cfg.selenium_web_browser)
logger.typewriter_log("使用浏览器:", Fore.GREEN, cfg.selenium_web_browser)
agent = Agent(
ai_name=ai_name,
memory=memory,
Expand Down
16 changes: 8 additions & 8 deletions autogpt/commands/file_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def write_to_file(filename: str, text: str) -> str:
str: A message indicating success or failure
"""
if check_duplicate_operation("write", filename):
return "Error: File has already been updated."
return "Error: 文件已经被更新."
try:
filepath = path_in_workspace(filename)
directory = os.path.dirname(filepath)
Expand All @@ -153,7 +153,7 @@ def write_to_file(filename: str, text: str) -> str:
with open(filepath, "w", encoding="utf-8") as f:
f.write(text)
log_operation("write", filename)
return "File written to successfully."
return "文件已成功写入."
except Exception as e:
return f"Error: {str(e)}"

Expand All @@ -176,7 +176,7 @@ def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str:
if shouldLog:
log_operation("append", filename)

return "Text appended successfully."
return "文本已成功追加."
except Exception as e:
return f"Error: {str(e)}"

Expand All @@ -191,12 +191,12 @@ def delete_file(filename: str) -> str:
str: A message indicating success or failure
"""
if check_duplicate_operation("delete", filename):
return "Error: File has already been deleted."
return "Error: 文件已被删除."
try:
filepath = path_in_workspace(filename)
os.remove(filepath)
log_operation("delete", filename)
return "File deleted successfully."
return "文件已成功删除."
except Exception as e:
return f"Error: {str(e)}"

Expand Down Expand Up @@ -235,7 +235,7 @@ def download_file(url, filename):
"""
safe_filename = path_in_workspace(filename)
try:
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
message = f"{Fore.YELLOW}下载文件: {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
with Spinner(message) as spinner:
session = requests.Session()
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
Expand All @@ -260,8 +260,8 @@ def download_file(url, filename):
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
spinner.update_message(f"{message} {progress}")

return f'Successfully downloaded and locally stored file: "{filename}"! (Size: {readable_file_size(total_size)})'
return f'已成功下载并本地存储文件:"{filename}"! (文件大小:{readable_file_size(total_size)})'
except requests.HTTPError as e:
return f"Got an HTTP Error whilst trying to download file: {e}"
return f"尝试下载文件时发生HTTP错误: {e}"
except Exception as e:
return "Error: " + str(e)
Loading

0 comments on commit ed2ff2b

Please sign in to comment.