Skip to content

Commit

Permalink
chore: merge latest code
Browse files Browse the repository at this point in the history
  • Loading branch information
fangyinc committed Dec 22, 2023
2 parents 1fe7e9b + d906522 commit aea528b
Show file tree
Hide file tree
Showing 21 changed files with 1,461 additions and 709 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,8 @@ At present, we have introduced several key features to showcase our current capa
We offer extensive model support, including dozens of large language models (LLMs) from both open-source and API agents, such as LLaMA/LLaMA2, Baichuan, ChatGLM, Wenxin, Tongyi, Zhipu, and many more.

- News
- 🔥🔥🔥 [qwen-72b-chat](https://huggingface.co/Qwen/Qwen-72B-Chat)
- 🔥🔥🔥 [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)
- 🔥🔥🔥 [Qwen-72B-Chat](https://huggingface.co/Qwen/Qwen-72B-Chat)
- 🔥🔥🔥 [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat)
- [More Supported LLMs](http://docs.dbgpt.site/docs/modules/smmf)

Expand Down
3 changes: 2 additions & 1 deletion README.zh.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,8 @@ DB-GPT是一个开源的数据库领域大模型框架。目的是构建大模
海量模型支持,包括开源、API代理等几十种大语言模型。如LLaMA/LLaMA2、Baichuan、ChatGLM、文心、通义、智谱等。当前已支持如下模型:

- 新增支持模型
- 🔥🔥🔥 [qwen-72b-chat](https://huggingface.co/Qwen/Qwen-72B-Chat)
- 🔥🔥🔥 [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)
- 🔥🔥🔥 [Qwen-72B-Chat](https://huggingface.co/Qwen/Qwen-72B-Chat)
- 🔥🔥🔥 [Yi-34B-Chat](https://huggingface.co/01-ai/Yi-34B-Chat)
- [更多开源模型](https://www.yuque.com/eosphoros/dbgpt-docs/iqaaqwriwhp6zslc#qQktR)

Expand Down
2 changes: 1 addition & 1 deletion dbgpt/app/chat_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ def get_conv_template(self, model_path: str) -> Conversation:

class LlamaCppChatAdapter(BaseChatAdpter):
def match(self, model_path: str):
from dbgpt.model.adapter import LlamaCppAdapater
from dbgpt.model.adapter.old_adapter import LlamaCppAdapater

if "llama-cpp" == model_path:
return True
Expand Down
8 changes: 7 additions & 1 deletion dbgpt/configs/model_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,9 @@ def get_device() -> str:
# https://huggingface.co/microsoft/Orca-2-13b
"orca-2-13b": os.path.join(MODEL_PATH, "Orca-2-13b"),
# https://huggingface.co/openchat/openchat_3.5
"openchat_3.5": os.path.join(MODEL_PATH, "openchat_3.5"),
"openchat-3.5": os.path.join(MODEL_PATH, "openchat_3.5"),
# https://huggingface.co/openchat/openchat-3.5-1210
"openchat-3.5-1210": os.path.join(MODEL_PATH, "openchat-3.5-1210"),
# https://huggingface.co/hfl/chinese-alpaca-2-7b
"chinese-alpaca-2-7b": os.path.join(MODEL_PATH, "chinese-alpaca-2-7b"),
# https://huggingface.co/hfl/chinese-alpaca-2-13b
Expand All @@ -125,6 +127,10 @@ def get_device() -> str:
"zephyr-7b-alpha": os.path.join(MODEL_PATH, "zephyr-7b-alpha"),
# https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
"mistral-7b-instruct-v0.1": os.path.join(MODEL_PATH, "Mistral-7B-Instruct-v0.1"),
# https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
"mixtral-8x7b-instruct-v0.1": os.path.join(
MODEL_PATH, "Mixtral-8x7B-Instruct-v0.1"
),
# https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca
"mistral-7b-openorca": os.path.join(MODEL_PATH, "Mistral-7B-OpenOrca"),
# https://huggingface.co/Xwin-LM/Xwin-LM-7B-V0.1
Expand Down
13 changes: 6 additions & 7 deletions dbgpt/core/interface/message.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -157,14 +157,13 @@ def to_openai_messages(messages: List["ModelMessage"]) -> List[Dict[str, str]]:
else:
pass
# Move the last user's information to the end
temp_his = history[::-1]
last_user_input = None
for m in temp_his:
if m["role"] == "user":
last_user_input = m
last_user_input_index = None
for i in range(len(history) - 1, -1, -1):
if history[i]["role"] == "user":
last_user_input_index = i
break
if last_user_input:
history.remove(last_user_input)
if last_user_input_index:
last_user_input = history.pop(last_user_input_index)
history.append(last_user_input)
return history

Expand Down
57 changes: 57 additions & 0 deletions dbgpt/core/interface/tests/test_message.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,23 @@ def conversation_with_messages():
return conv


@pytest.fixture
def human_model_message():
return ModelMessage(role=ModelMessageRoleType.HUMAN, content="Hello")


@pytest.fixture
def ai_model_message():
return ModelMessage(role=ModelMessageRoleType.AI, content="Hi there")


@pytest.fixture
def system_model_message():
return ModelMessage(
role=ModelMessageRoleType.SYSTEM, content="You are a helpful chatbot!"
)


def test_init(basic_conversation):
assert basic_conversation.chat_mode == "chat_normal"
assert basic_conversation.user_name == "user1"
Expand Down Expand Up @@ -370,3 +387,43 @@ def test_parse_model_messages_multiple_system_messages():
assert user_prompt == "How are you?"
assert system_messages == ["System start", "System check"]
assert history_messages == [["Hey", "Hello!"]]


def test_to_openai_messages(
human_model_message, ai_model_message, system_model_message
):
none_messages = ModelMessage.to_openai_messages([])
assert none_messages == []

single_messages = ModelMessage.to_openai_messages([human_model_message])
assert single_messages == [{"role": "user", "content": human_model_message.content}]

normal_messages = ModelMessage.to_openai_messages(
[
system_model_message,
human_model_message,
ai_model_message,
human_model_message,
]
)
assert normal_messages == [
{"role": "system", "content": system_model_message.content},
{"role": "user", "content": human_model_message.content},
{"role": "assistant", "content": ai_model_message.content},
{"role": "user", "content": human_model_message.content},
]

shuffle_messages = ModelMessage.to_openai_messages(
[
system_model_message,
human_model_message,
human_model_message,
ai_model_message,
]
)
assert shuffle_messages == [
{"role": "system", "content": system_model_message.content},
{"role": "user", "content": human_model_message.content},
{"role": "assistant", "content": ai_model_message.content},
{"role": "user", "content": human_model_message.content},
]
Empty file added dbgpt/model/adapter/__init__.py
Empty file.
Loading

0 comments on commit aea528b

Please sign in to comment.