From 550a131685fe8d8dae76e1eeb26afed9f704dfbf Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sun, 17 Mar 2024 12:03:25 -0400 Subject: [PATCH 1/4] =?UTF-8?q?deps:=20=E6=B7=BB=E5=8A=A0=20anthropic=20?= =?UTF-8?q?=E4=BE=9D=E8=B5=96=E5=BA=93?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/core/bootutils/deps.py | 1 + requirements.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/pkg/core/bootutils/deps.py b/pkg/core/bootutils/deps.py index be54c2ff..41097f27 100644 --- a/pkg/core/bootutils/deps.py +++ b/pkg/core/bootutils/deps.py @@ -3,6 +3,7 @@ required_deps = { "requests": "requests", "openai": "openai", + "anthropic": "anthropic", "colorlog": "colorlog", "mirai": "yiri-mirai-rc", "aiocqhttp": "aiocqhttp", diff --git a/requirements.txt b/requirements.txt index 3c7007b9..6a3e718c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ requests openai>1.0.0 +anthropic colorlog~=6.6.0 yiri-mirai-rc aiocqhttp From 1dae7bd655b1e20211dd6893ba0b2fdb2f001c48 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sun, 17 Mar 2024 12:44:45 -0400 Subject: [PATCH 2/4] =?UTF-8?q?feat:=20=E5=AF=B9=20claude=20api=20?= =?UTF-8?q?=E7=9A=84=E5=9F=BA=E6=9C=AC=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/provider/modelmgr/apis/anthropicmsgs.py | 65 +++++++++++++++++++++ pkg/provider/modelmgr/apis/chatcmpl.py | 4 +- pkg/provider/modelmgr/modelmgr.py | 2 +- templates/metadata/llm-models.json | 15 +++++ templates/provider.json | 10 ++++ 5 files changed, 92 insertions(+), 4 deletions(-) create mode 100644 pkg/provider/modelmgr/apis/anthropicmsgs.py diff --git a/pkg/provider/modelmgr/apis/anthropicmsgs.py b/pkg/provider/modelmgr/apis/anthropicmsgs.py new file mode 100644 index 00000000..bfa088dd --- /dev/null +++ b/pkg/provider/modelmgr/apis/anthropicmsgs.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import typing + +import anthropic + +from .. import api, entities, errors + +from .. import api, entities, errors +from ....core import entities as core_entities +from ... import entities as llm_entities +from ...tools import entities as tools_entities + + +@api.requester_class("anthropic-messages") +class AnthropicMessages(api.LLMAPIRequester): + """Anthropic Messages API 请求器""" + + client: anthropic.AsyncAnthropic + + async def initialize(self): + self.client = anthropic.AsyncAnthropic( + api_key="", + base_url=self.ap.provider_cfg.data['requester']['anthropic-messages']['base-url'], + timeout=self.ap.provider_cfg.data['requester']['anthropic-messages']['timeout'], + proxies=self.ap.proxy_mgr.get_forward_proxies() + ) + + async def request( + self, + query: core_entities.Query, + ) -> typing.AsyncGenerator[llm_entities.Message, None]: + self.client.api_key = query.use_model.token_mgr.get_token() + + args = self.ap.provider_cfg.data['requester']['anthropic-messages']['args'].copy() + args["model"] = query.use_model.name if query.use_model.model_name is None else query.use_model.model_name + + req_messages = [ # req_messages 仅用于类内,外部同步由 query.messages 进行 + m.dict(exclude_none=True) for m in query.prompt.messages + ] + [m.dict(exclude_none=True) for m in query.messages] + + # 删除所有 role=system & content='' 的消息 + req_messages = [ + m for m in req_messages if not (m["role"] == "system" and m["content"].strip() == "") + ] + + # 检查是否有 role=system 的消息,若有,改为 role=user,并在后面加一个 role=assistant 的消息 + system_role_index = [] + for i, m in enumerate(req_messages): + if m["role"] == "system": + system_role_index.append(i) + m["role"] = "user" + + if system_role_index: + for i in system_role_index[::-1]: + req_messages.insert(i + 1, {"role": "assistant", "content": "Okay, I'll follow."}) + + args["messages"] = req_messages + + resp = await self.client.messages.create(**args) + + yield llm_entities.Message( + content=resp.content[0].text, + role=resp.role + ) \ No newline at end of file diff --git a/pkg/provider/modelmgr/apis/chatcmpl.py b/pkg/provider/modelmgr/apis/chatcmpl.py index c8d1a812..0fe2788d 100644 --- a/pkg/provider/modelmgr/apis/chatcmpl.py +++ b/pkg/provider/modelmgr/apis/chatcmpl.py @@ -9,8 +9,6 @@ import openai.types.chat.chat_completion as chat_completion import httpx -from pkg.provider.entities import Message - from .. import api, entities, errors from ....core import entities as core_entities from ... import entities as llm_entities @@ -127,7 +125,7 @@ async def _request( req_messages.append(msg.dict(exclude_none=True)) - async def request(self, query: core_entities.Query) -> AsyncGenerator[Message, None]: + async def request(self, query: core_entities.Query) -> AsyncGenerator[llm_entities.Message, None]: try: async for msg in self._request(query): yield msg diff --git a/pkg/provider/modelmgr/modelmgr.py b/pkg/provider/modelmgr/modelmgr.py index 4a5b17e2..21a1c757 100644 --- a/pkg/provider/modelmgr/modelmgr.py +++ b/pkg/provider/modelmgr/modelmgr.py @@ -6,7 +6,7 @@ from ...core import app from . import token, api -from .apis import chatcmpl +from .apis import chatcmpl, anthropicmsgs FETCH_MODEL_LIST_URL = "https://api.qchatgpt.rockchin.top/api/v2/fetch/model_list" diff --git a/templates/metadata/llm-models.json b/templates/metadata/llm-models.json index 8775e665..061a7cc4 100644 --- a/templates/metadata/llm-models.json +++ b/templates/metadata/llm-models.json @@ -29,6 +29,21 @@ { "model_name": "gemini-pro", "name": "OneAPI/gemini-pro" + }, + { + "name": "claude-3-opus-20240229", + "requester": "anthropic-messages", + "token_mgr": "anthropic" + }, + { + "name": "claude-3-sonnet-20240229", + "requester": "anthropic-messages", + "token_mgr": "anthropic" + }, + { + "name": "claude-3-haiku-20240307", + "requester": "anthropic-messages", + "token_mgr": "anthropic" } ] } \ No newline at end of file diff --git a/templates/provider.json b/templates/provider.json index d1a68d77..1c26bd4c 100644 --- a/templates/provider.json +++ b/templates/provider.json @@ -3,6 +3,9 @@ "keys": { "openai": [ "sk-1234567890" + ], + "anthropic": [ + "sk-1234567890" ] }, "requester": { @@ -10,6 +13,13 @@ "base-url": "https://api.openai.com/v1", "args": {}, "timeout": 120 + }, + "anthropic-messages": { + "base-url": "https://api.anthropic.com/v1", + "args": { + "max_tokens": 1024 + }, + "timeout": 120 } }, "model": "gpt-3.5-turbo", From 327b2509f68aefae1615906425c1e50dc5b36119 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sun, 17 Mar 2024 23:06:40 -0400 Subject: [PATCH 3/4] =?UTF-8?q?perf:=20=E5=BF=BD=E7=95=A5=E7=94=A8?= =?UTF-8?q?=E6=88=B7=E7=A9=BA=E6=B6=88=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/provider/modelmgr/apis/anthropicmsgs.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/provider/modelmgr/apis/anthropicmsgs.py b/pkg/provider/modelmgr/apis/anthropicmsgs.py index bfa088dd..6fd1a055 100644 --- a/pkg/provider/modelmgr/apis/anthropicmsgs.py +++ b/pkg/provider/modelmgr/apis/anthropicmsgs.py @@ -55,6 +55,11 @@ async def request( for i in system_role_index[::-1]: req_messages.insert(i + 1, {"role": "assistant", "content": "Okay, I'll follow."}) + # 忽略掉空消息,用户可能发送空消息,而上层未过滤 + req_messages = [ + m for m in req_messages if m["content"].strip() != "" + ] + args["messages"] = req_messages resp = await self.client.messages.create(**args) From a723c8ce378888920b84033534ea9c2bb3037681 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sun, 17 Mar 2024 23:22:26 -0400 Subject: [PATCH 4/4] =?UTF-8?q?perf:=20claude=20=E7=9A=84=E6=8E=A5?= =?UTF-8?q?=E5=8F=A3=E5=BC=82=E5=B8=B8=E5=A4=84=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/provider/modelmgr/apis/anthropicmsgs.py | 24 +++++++++++++++------ pkg/provider/modelmgr/errors.py | 2 +- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/pkg/provider/modelmgr/apis/anthropicmsgs.py b/pkg/provider/modelmgr/apis/anthropicmsgs.py index 6fd1a055..71423e0a 100644 --- a/pkg/provider/modelmgr/apis/anthropicmsgs.py +++ b/pkg/provider/modelmgr/apis/anthropicmsgs.py @@ -1,6 +1,7 @@ from __future__ import annotations import typing +import traceback import anthropic @@ -62,9 +63,20 @@ async def request( args["messages"] = req_messages - resp = await self.client.messages.create(**args) - - yield llm_entities.Message( - content=resp.content[0].text, - role=resp.role - ) \ No newline at end of file + try: + + resp = await self.client.messages.create(**args) + + yield llm_entities.Message( + content=resp.content[0].text, + role=resp.role + ) + except anthropic.AuthenticationError as e: + raise errors.RequesterError(f'api-key 无效: {e.message}') + except anthropic.BadRequestError as e: + raise errors.RequesterError(str(e.message)) + except anthropic.NotFoundError as e: + if 'model: ' in str(e): + raise errors.RequesterError(f'模型无效: {e.message}') + else: + raise errors.RequesterError(f'请求地址无效: {e.message}') \ No newline at end of file diff --git a/pkg/provider/modelmgr/errors.py b/pkg/provider/modelmgr/errors.py index 4feddeab..d466cf11 100644 --- a/pkg/provider/modelmgr/errors.py +++ b/pkg/provider/modelmgr/errors.py @@ -2,4 +2,4 @@ class RequesterError(Exception): """Base class for all Requester errors.""" def __init__(self, message: str): - super().__init__("模型请求失败: "+message) \ No newline at end of file + super().__init__("模型请求失败: "+message)