From a0291ed15537383c36b56f313b6fbc871bb9d6a4 Mon Sep 17 00:00:00 2001 From: Danny Avila Date: Sat, 31 Aug 2024 16:33:51 -0400 Subject: [PATCH] =?UTF-8?q?=F0=9F=9A=A7=20chore:=20merge=20latest=20dev=20?= =?UTF-8?q?build=20to=20main=20repo=20(#3844)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * agents - phase 1 (#30) * chore: copy assistant files * feat: frontend and data-provider * feat: backend get endpoint test * fix(MessageEndpointIcon): switched to AgentName and AgentAvatar * fix: small fixes * fix: agent endpoint config * fix: show Agent Builder * chore: install agentus * chore: initial scaffolding for agents * fix: updated Assistant logic to Agent Logic for some Agent components * WIP first pass, demo of agent package * WIP: initial backend infra for agents * fix: agent list error * wip: agents routing * chore: Refactor useSSE hook to handle different data events * wip: correctly emit events * chore: Update @librechat/agentus npm dependency to version 1.0.9 * remove comment * first pass: streaming agent text * chore: Remove @librechat/agentus root-level workspace npm dependency * feat: Agent Schema and Model * fix: content handling fixes * fix: content message save * WIP: new content data * fix: run step issue with tool calls * chore: Update @librechat/agentus npm dependency to version 1.1.5 * feat: update controller and agent routes * wip: initial backend tool and tool error handling support * wip: tool chunks * chore: Update @librechat/agentus npm dependency to version 1.1.7 * chore: update tool_call typing, add test conditions and logs * fix: create agent * fix: create agent * first pass: render completed content parts * fix: remove logging, fix step handler typing * chore: Update @librechat/agentus npm dependency to version 1.1.9 * refactor: cleanup maps on unmount * chore: Update BaseClient.js to safely count tokens for string, number, and boolean values * fix: support subsequent messages with tool_calls * chore: export order * fix: select agent * fix: tool call types and handling * chore: switch to anthropic for testing * fix: AgentSelect * refactor: experimental: OpenAIClient to use array for intermediateReply * fix(useSSE): revert old condition for streaming legacy client tokens * fix: lint * revert `agent_id` to `id` * chore: update localization keys for agent-related components * feat: zod schema handling for actions * refactor(actions): if no params, no zodSchema * chore: Update @librechat/agentus npm dependency to version 1.2.1 * feat: first pass, actions * refactor: empty schema for actions without params * feat: Update createRun function to accept additional options * fix: message payload formatting; feat: add more client options * fix: ToolCall component rendering when action has no args but has output * refactor(ToolCall): allow non-stringy args * WIP: first pass, correctly formatted tool_calls between providers * refactor: Remove duplicate import of 'roles' module * refactor: Exclude 'vite.config.ts' from TypeScript compilation * refactor: fix agent related types > - no need to use endpoint/model fields for identifying agent metadata > - add `provider` distinction for agent-configured 'endpoint' - no need for agent-endpoint map - reduce complexity of tools as functions into tools as string[] - fix types related to above changes - reduce unnecessary variables for queries/mutations and corresponding react-query keys * refactor: Add tools and tool_kwargs fields to agent schema * refactor: Remove unused code and update dependencies * refactor: Update updateAgentHandler to use req.body directly * refactor: Update AgentSelect component to use localized hooks * refactor: Update agent schema to include tools and provider fields * refactor(AgentPanel): add scrollbar gutter, add provider field to form, fix agent schema required values * refactor: Update AgentSwitcher component to use selectedAgentId instead of selectedAgent * refactor: Update AgentPanel component to include alternateName import and defaultAgentFormValues * refactor(SelectDropDown): allow setting value as option while still supporting legacy usage (string values only) * refactor: SelectDropdown changes - Only necessary when the available values are objects with label/value fields and the selected value is expected to be a string. * refactor: TypeError issues and handle provider as option * feat: Add placeholder for provider selection in AgentPanel component * refactor: Update agent schema to include author and provider fields * fix: show expected 'create agent' placeholder when creating agent * chore: fix localization strings, hide capabilities form for now * chore: typing * refactor: import order and use compact agents schema for now * chore: typing * refactor: Update AgentForm type to use AgentCapabilities * fix agent form agent selection issues * feat: responsive agent selection * fix: Handle cancelled fetch in useSelectAgent hook * fix: reset agent form on accordion close/open * feat: Add agent_id to default conversation for agents endpoint * feat: agents endpoint request handling * refactor: reset conversation model on agent select * refactor: add `additional_instructions` to conversation schema, organize other fields * chore: casing * chore: types * refactor(loadAgentTools): explicitly pass agent_id, do not pass `model` to loadAgentTools for now, load action sets by agent_id * WIP: initial draft of real agent client initialization * WIP: first pass, anthropic agent requests * feat: remember last selected agent * feat: openai and azure connected * fix: prioritize agent model for runs unless an explicit override model is passed from client * feat: Agent Actions * fix: save agent id to convo * feat: model panel (#29) * feat: model panel * bring back comments * fix: method still null * fix: AgentPanel FormContext * feat: add more parameters * fix: style issues; refactor: Agent Controller * fix: cherry-pick * fix: Update AgentAvatar component to use AssistantIcon instead of BrainCircuit * feat: OGDialog for delete agent; feat(assistant): update Agent types, introduced `model_parameters` * feat: icon and general `model_parameters` update * feat: use react-hook-form better * fix: agent builder form reset issue when switching panels * refactor: modularize agent builder form --------- Co-authored-by: Danny Avila * fix: AgentPanel and ModelPanel type issues and use `useFormContext` and `watch` instead of `methods` directly and `useWatch`. * fix: tool call issues due to invalid input (anthropic) of empty string * fix: handle empty text in Part component --------- Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com> * refactor: remove form ModelPanel and fixed nested ternary expressions in AgentConfig * fix: Model Parameters not saved correctly * refactor: remove console log * feat: avatar upload and get for Agents (#36) Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com> * chore: update to public package * fix: typing, optional chaining * fix: cursor not showing for content parts * chore: conditionally enable agents * ci: fix azure test * ci: fix frontend tests, fix eslint api * refactor: Remove unused errorContentPart variable * continue of the agent message PR (#40) * last fixes * fix: agentMap * pr merge test (#41) * fix: model icon not fetching correctly * remove console logs * feat: agent name * refactor: pass documentsMap as a prop to allow re-render of assistant form * refactor: pass documentsMap as a prop to allow re-render of assistant form * chore: Bump version to 0.7.419 * fix: TypeError: Cannot read properties of undefined (reading 'id') * refactor: update AgentSwitcher component to use ControlCombobox instead of Combobox --------- Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com> --- api/app/clients/BaseClient.js | 55 +- api/app/clients/OpenAIClient.js | 19 +- api/app/clients/llm/createLLM.js | 2 +- api/app/clients/prompts/formatMessages.js | 78 +- .../clients/tools/structured/TavilySearch.js | 78 + api/models/Action.js | 6 +- api/models/Agent.js | 84 + api/models/Message.js | 86 +- api/models/schema/action.js | 1 + api/models/schema/agent.js | 66 + api/package.json | 2 + api/server/controllers/ModelController.js | 5 +- api/server/controllers/agents/callbacks.js | 83 + api/server/controllers/agents/client.js | 462 + api/server/controllers/agents/demo.js | 44 + api/server/controllers/agents/errors.js | 153 + api/server/controllers/agents/llm.js | 106 + api/server/controllers/agents/request.js | 150 + api/server/controllers/agents/run.js | 59 + api/server/controllers/agents/v1.js | 208 + api/server/index.js | 1 + api/server/middleware/buildEndpointOption.js | 15 +- api/server/routes/agents/actions.js | 166 + api/server/routes/agents/chat.js | 35 + api/server/routes/agents/index.js | 21 + api/server/routes/agents/v1.js | 77 + api/server/routes/assistants/actions.js | 22 +- api/server/routes/index.js | 62 +- api/server/services/ActionService.js | 22 +- api/server/services/Config/EndpointService.js | 2 + .../services/Config/loadDefaultEConfig.js | 13 +- .../services/Config/loadDefaultModels.js | 1 + api/server/services/Endpoints/agents/build.js | 30 + api/server/services/Endpoints/agents/index.js | 7 + .../services/Endpoints/agents/initialize.js | 119 + .../Endpoints/anthropic/initializeClient.js | 15 +- .../services/Endpoints/anthropic/llm.js | 55 + .../Endpoints/openAI/initializeClient.js | 32 +- api/server/services/Endpoints/openAI/llm.js | 120 + api/server/services/Tokenizer.js | 64 + api/server/services/ToolService.js | 117 +- api/typedefs.js | 60 + api/utils/tokens.js | 1 + client/package.json | 2 +- client/src/Providers/AgentsContext.tsx | 27 + client/src/Providers/AgentsMapContext.tsx | 6 + client/src/Providers/index.ts | 3 + client/src/common/agents-types.ts | 27 + client/src/common/assistants-types.ts | 6 +- client/src/common/index.ts | 1 + client/src/common/types.ts | 34 + .../components/Chat/Input/CircleRender.tsx | 36 + .../components/Chat/Input/Files/FileRow.tsx | 3 + .../components/Chat/Menus/Endpoints/Icons.tsx | 22 +- .../Chat/Messages/Content/ContentParts.tsx | 2 +- .../components/Chat/Messages/Content/Part.tsx | 142 +- .../Chat/Messages/Content/ToolCall.tsx | 32 +- .../components/Chat/Messages/MessageIcon.tsx | 20 +- .../components/Chat/Messages/MessageParts.tsx | 24 +- .../src/components/Endpoints/ConvoIconURL.tsx | 7 +- .../Endpoints/MessageEndpointIcon.tsx | 32 + .../src/components/Endpoints/MinimalIcon.tsx | 2 + .../Endpoints/Settings/AgentSettings.tsx | 4 +- .../Endpoints/Settings/Assistants.tsx | 12 +- .../components/Endpoints/Settings/settings.ts | 1 + .../Prompts/Groups/CategorySelector.tsx | 6 +- client/src/components/Share/MessageIcon.tsx | 10 +- .../components/SidePanel/AgentSwitcher.tsx | 87 + .../SidePanel/Agents/ActionsAuth.tsx | 296 + .../SidePanel/Agents/ActionsInput.tsx | 285 + .../SidePanel/Agents/ActionsPanel.tsx | 198 + .../SidePanel/Agents/ActionsTable/Columns.tsx | 54 + .../SidePanel/Agents/ActionsTable/Table.tsx | 47 + .../SidePanel/Agents/ActionsTable/index.ts | 2 + .../SidePanel/Agents/AgentAvatar.tsx | 196 + .../SidePanel/Agents/AgentConfig.tsx | 366 + .../SidePanel/Agents/AgentPanel.tsx | 207 + .../SidePanel/Agents/AgentPanelSwitch.tsx | 59 + .../SidePanel/Agents/AgentSelect.tsx | 182 + .../components/SidePanel/Agents/AgentTool.tsx | 104 + .../SidePanel/Agents/CapabilitiesForm.tsx | 60 + .../src/components/SidePanel/Agents/Code.tsx | 66 + .../components/SidePanel/Agents/CodeFiles.tsx | 95 + .../SidePanel/Agents/ContextButton.tsx | 110 + .../SidePanel/Agents/ImageVision.tsx | 40 + .../components/SidePanel/Agents/Images.tsx | 135 + .../SidePanel/Agents/ModelPanel.tsx | 283 + .../components/SidePanel/Agents/Retrieval.tsx | 91 + .../{AssistantAction.tsx => Action.tsx} | 8 +- .../SidePanel/Builder/ActionsPanel.tsx | 18 +- .../SidePanel/Builder/AssistantPanel.tsx | 22 +- .../SidePanel/Builder/AssistantSelect.tsx | 33 +- .../SidePanel/Builder/PanelSwitch.tsx | 17 +- client/src/components/SidePanel/SidePanel.tsx | 5 +- client/src/components/SidePanel/Switcher.tsx | 10 +- .../src/components/Tools/ToolSelectDialog.tsx | 25 +- client/src/components/ui/ModelParameters.tsx | 185 + client/src/components/ui/SelectDropDown.tsx | 39 +- client/src/components/ui/index.ts | 1 + client/src/data-provider/mutations.ts | 244 +- client/src/data-provider/queries.ts | 112 +- client/src/hooks/Agents/index.ts | 2 + client/src/hooks/Agents/useAgentsMap.ts | 21 + client/src/hooks/Agents/useSelectAgent.ts | 90 + client/src/hooks/Chat/useChatFunctions.ts | 52 +- client/src/hooks/Input/useTextarea.ts | 6 +- .../src/hooks/Messages/useMessageHelpers.tsx | 17 +- client/src/hooks/Nav/useSideNavLinks.ts | 21 + client/src/hooks/SSE/index.ts | 1 + client/src/hooks/SSE/useEventHandlers.ts | 5 +- client/src/hooks/SSE/useSSE.ts | 10 +- client/src/hooks/SSE/useStepHandler.ts | 220 + client/src/hooks/index.ts | 1 + client/src/localization/languages/De.ts | 3 +- client/src/localization/languages/Eng.ts | 18 + client/src/routes/Root.tsx | 21 +- client/src/store/endpoints.ts | 1 + client/src/store/families.ts | 14 + client/src/utils/buildDefaultConvo.ts | 12 +- client/src/utils/forms.ts | 91 + client/src/utils/index.ts | 1 + client/src/utils/map.ts | 13 +- client/src/utils/messages.ts | 2 + package-lock.json | 12286 ++++++++-------- packages/data-provider/package.json | 2 +- packages/data-provider/specs/actions.spec.ts | 277 +- packages/data-provider/specs/openapiSpecs.ts | 127 + packages/data-provider/src/actions.ts | 133 +- packages/data-provider/src/api-endpoints.ts | 15 + packages/data-provider/src/config.ts | 57 + packages/data-provider/src/data-service.ts | 211 +- packages/data-provider/src/file-config.ts | 1 + packages/data-provider/src/index.ts | 2 + packages/data-provider/src/keys.ts | 7 + packages/data-provider/src/parsers.ts | 33 +- packages/data-provider/src/schemas.ts | 166 +- packages/data-provider/src/types.ts | 1 + packages/data-provider/src/types/agents.ts | 219 + .../data-provider/src/types/assistants.ts | 134 +- packages/data-provider/src/types/mutations.ts | 50 + packages/data-provider/src/types/runs.ts | 21 + 141 files changed, 15035 insertions(+), 6276 deletions(-) create mode 100644 api/app/clients/tools/structured/TavilySearch.js create mode 100644 api/models/Agent.js create mode 100644 api/models/schema/agent.js create mode 100644 api/server/controllers/agents/callbacks.js create mode 100644 api/server/controllers/agents/client.js create mode 100644 api/server/controllers/agents/demo.js create mode 100644 api/server/controllers/agents/errors.js create mode 100644 api/server/controllers/agents/llm.js create mode 100644 api/server/controllers/agents/request.js create mode 100644 api/server/controllers/agents/run.js create mode 100644 api/server/controllers/agents/v1.js create mode 100644 api/server/routes/agents/actions.js create mode 100644 api/server/routes/agents/chat.js create mode 100644 api/server/routes/agents/index.js create mode 100644 api/server/routes/agents/v1.js create mode 100644 api/server/services/Endpoints/agents/build.js create mode 100644 api/server/services/Endpoints/agents/index.js create mode 100644 api/server/services/Endpoints/agents/initialize.js create mode 100644 api/server/services/Endpoints/anthropic/llm.js create mode 100644 api/server/services/Endpoints/openAI/llm.js create mode 100644 api/server/services/Tokenizer.js create mode 100644 client/src/Providers/AgentsContext.tsx create mode 100644 client/src/Providers/AgentsMapContext.tsx create mode 100644 client/src/common/agents-types.ts create mode 100644 client/src/components/Chat/Input/CircleRender.tsx create mode 100644 client/src/components/SidePanel/AgentSwitcher.tsx create mode 100644 client/src/components/SidePanel/Agents/ActionsAuth.tsx create mode 100644 client/src/components/SidePanel/Agents/ActionsInput.tsx create mode 100644 client/src/components/SidePanel/Agents/ActionsPanel.tsx create mode 100644 client/src/components/SidePanel/Agents/ActionsTable/Columns.tsx create mode 100644 client/src/components/SidePanel/Agents/ActionsTable/Table.tsx create mode 100644 client/src/components/SidePanel/Agents/ActionsTable/index.ts create mode 100644 client/src/components/SidePanel/Agents/AgentAvatar.tsx create mode 100644 client/src/components/SidePanel/Agents/AgentConfig.tsx create mode 100644 client/src/components/SidePanel/Agents/AgentPanel.tsx create mode 100644 client/src/components/SidePanel/Agents/AgentPanelSwitch.tsx create mode 100644 client/src/components/SidePanel/Agents/AgentSelect.tsx create mode 100644 client/src/components/SidePanel/Agents/AgentTool.tsx create mode 100644 client/src/components/SidePanel/Agents/CapabilitiesForm.tsx create mode 100644 client/src/components/SidePanel/Agents/Code.tsx create mode 100644 client/src/components/SidePanel/Agents/CodeFiles.tsx create mode 100644 client/src/components/SidePanel/Agents/ContextButton.tsx create mode 100644 client/src/components/SidePanel/Agents/ImageVision.tsx create mode 100644 client/src/components/SidePanel/Agents/Images.tsx create mode 100644 client/src/components/SidePanel/Agents/ModelPanel.tsx create mode 100644 client/src/components/SidePanel/Agents/Retrieval.tsx rename client/src/components/SidePanel/Builder/{AssistantAction.tsx => Action.tsx} (89%) create mode 100644 client/src/components/ui/ModelParameters.tsx create mode 100644 client/src/hooks/Agents/index.ts create mode 100644 client/src/hooks/Agents/useAgentsMap.ts create mode 100644 client/src/hooks/Agents/useSelectAgent.ts create mode 100644 client/src/hooks/SSE/useStepHandler.ts create mode 100644 client/src/utils/forms.ts create mode 100644 packages/data-provider/src/types/agents.ts create mode 100644 packages/data-provider/src/types/runs.ts diff --git a/api/app/clients/BaseClient.js b/api/app/clients/BaseClient.js index 0d769155b1b..76403880608 100644 --- a/api/app/clients/BaseClient.js +++ b/api/app/clients/BaseClient.js @@ -34,6 +34,12 @@ class BaseClient { this.userMessagePromise; /** @type {ClientDatabaseSavePromise} */ this.responsePromise; + /** @type {string} */ + this.user; + /** @type {string} */ + this.conversationId; + /** @type {string} */ + this.responseMessageId; } setOptions() { @@ -161,6 +167,8 @@ class BaseClient { this.currentMessages[this.currentMessages.length - 1].messageId = head; } + this.responseMessageId = responseMessageId; + return { ...opts, user, @@ -347,7 +355,12 @@ class BaseClient { }; } - async handleContextStrategy({ instructions, orderedMessages, formattedMessages }) { + async handleContextStrategy({ + instructions, + orderedMessages, + formattedMessages, + buildTokenMap = true, + }) { let _instructions; let tokenCount; @@ -417,19 +430,23 @@ class BaseClient { maxContextTokens: this.maxContextTokens, }); - let tokenCountMap = orderedWithInstructions.reduce((map, message, index) => { - const { messageId } = message; - if (!messageId) { - return map; - } + /** @type {Record | undefined} */ + let tokenCountMap; + if (buildTokenMap) { + tokenCountMap = orderedWithInstructions.reduce((map, message, index) => { + const { messageId } = message; + if (!messageId) { + return map; + } - if (shouldSummarize && index === summaryIndex && !usePrevSummary) { - map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount }; - } + if (shouldSummarize && index === summaryIndex && !usePrevSummary) { + map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount }; + } - map[messageId] = orderedWithInstructions[index].tokenCount; - return map; - }, {}); + map[messageId] = orderedWithInstructions[index].tokenCount; + return map; + }, {}); + } const promptTokens = this.maxContextTokens - remainingContextTokens; @@ -542,13 +559,19 @@ class BaseClient { isEdited, model: this.modelOptions.model, sender: this.sender, - text: addSpaceIfNeeded(generation) + completion, promptTokens, iconURL: this.options.iconURL, endpoint: this.options.endpoint, ...(this.metadata ?? {}), }; + if (typeof completion === 'string') { + responseMessage.text = addSpaceIfNeeded(generation) + completion; + } else if (completion) { + responseMessage.text = ''; + responseMessage.content = completion; + } + if ( tokenCountMap && this.recordTokenUsage && @@ -868,8 +891,12 @@ class BaseClient { processValue(nestedValue); } - } else { + } else if (typeof value === 'string') { numTokens += this.getTokenCount(value); + } else if (typeof value === 'number') { + numTokens += this.getTokenCount(value.toString()); + } else if (typeof value === 'boolean') { + numTokens += this.getTokenCount(value.toString()); } }; diff --git a/api/app/clients/OpenAIClient.js b/api/app/clients/OpenAIClient.js index 0b13488537e..4338a29d5a4 100644 --- a/api/app/clients/OpenAIClient.js +++ b/api/app/clients/OpenAIClient.js @@ -1023,7 +1023,7 @@ ${convo} async chatCompletion({ payload, onProgress, abortController = null }) { let error = null; const errorCallback = (err) => (error = err); - let intermediateReply = ''; + const intermediateReply = []; try { if (!abortController) { abortController = new AbortController(); @@ -1217,19 +1217,19 @@ ${convo} } if (typeof finalMessage.content !== 'string' || finalMessage.content.trim() === '') { - finalChatCompletion.choices[0].message.content = intermediateReply; + finalChatCompletion.choices[0].message.content = intermediateReply.join(''); } }) .on('finalMessage', (message) => { if (message?.role !== 'assistant') { - stream.messages.push({ role: 'assistant', content: intermediateReply }); + stream.messages.push({ role: 'assistant', content: intermediateReply.join('') }); UnexpectedRoleError = true; } }); for await (const chunk of stream) { const token = chunk.choices[0]?.delta?.content || ''; - intermediateReply += token; + intermediateReply.push(token); onProgress(token); if (abortController.signal.aborted) { stream.controller.abort(); @@ -1285,11 +1285,12 @@ ${convo} } if (typeof message.content !== 'string' || message.content.trim() === '') { + const reply = intermediateReply.join(''); logger.debug( '[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content', - { intermediateReply }, + { intermediateReply: reply }, ); - return intermediateReply; + return reply; } return message.content; @@ -1298,7 +1299,7 @@ ${convo} err?.message?.includes('abort') || (err instanceof OpenAI.APIError && err?.message?.includes('abort')) ) { - return intermediateReply; + return intermediateReply.join(''); } if ( err?.message?.includes( @@ -1313,10 +1314,10 @@ ${convo} (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) ) { logger.error('[OpenAIClient] Known OpenAI error:', err); - return intermediateReply; + return intermediateReply.join(''); } else if (err instanceof OpenAI.APIError) { if (intermediateReply) { - return intermediateReply; + return intermediateReply.join(''); } else { throw err; } diff --git a/api/app/clients/llm/createLLM.js b/api/app/clients/llm/createLLM.js index 09b29cca8e9..3344ced4ba3 100644 --- a/api/app/clients/llm/createLLM.js +++ b/api/app/clients/llm/createLLM.js @@ -8,7 +8,7 @@ const { isEnabled } = require('~/server/utils'); * @param {Object} options - The options for creating the LLM. * @param {ModelOptions} options.modelOptions - The options specific to the model, including modelName, temperature, presence_penalty, frequency_penalty, and other model-related settings. * @param {ConfigOptions} options.configOptions - Configuration options for the API requests, including proxy settings and custom headers. - * @param {Callbacks} options.callbacks - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count. + * @param {Callbacks} [options.callbacks] - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count. * @param {boolean} [options.streaming=false] - Determines if the LLM should operate in streaming mode. * @param {string} options.openAIApiKey - The API key for OpenAI, used for authentication. * @param {AzureOptions} [options.azure={}] - Optional Azure-specific configurations. If provided, Azure configurations take precedence over OpenAI configurations. diff --git a/api/app/clients/prompts/formatMessages.js b/api/app/clients/prompts/formatMessages.js index c19eee260af..87d5ba7a15f 100644 --- a/api/app/clients/prompts/formatMessages.js +++ b/api/app/clients/prompts/formatMessages.js @@ -1,4 +1,5 @@ -const { EModelEndpoint } = require('librechat-data-provider'); +const { ToolMessage } = require('@langchain/core/messages'); +const { EModelEndpoint, ContentTypes } = require('librechat-data-provider'); const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema'); /** @@ -14,11 +15,11 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema'); */ const formatVisionMessage = ({ message, image_urls, endpoint }) => { if (endpoint === EModelEndpoint.anthropic) { - message.content = [...image_urls, { type: 'text', text: message.content }]; + message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }]; return message; } - message.content = [{ type: 'text', text: message.content }, ...image_urls]; + message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls]; return message; }; @@ -51,7 +52,7 @@ const formatMessage = ({ message, userName, assistantName, endpoint, langChain = _role = roleMapping[lc_id[2]]; } const role = _role ?? (sender && sender?.toLowerCase() === 'user' ? 'user' : 'assistant'); - const content = text ?? _content ?? ''; + const content = _content ?? text ?? ''; const formattedMessage = { role, content, @@ -131,4 +132,71 @@ const formatFromLangChain = (message) => { }; }; -module.exports = { formatMessage, formatLangChainMessages, formatFromLangChain }; +/** + * Formats an array of messages for LangChain, handling tool calls and creating ToolMessage instances. + * + * @param {Array>} payload - The array of messages to format. + * @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls. + */ +const formatAgentMessages = (payload) => { + const messages = []; + + for (const message of payload) { + if (message.role !== 'assistant') { + messages.push(formatMessage({ message, langChain: true })); + continue; + } + + let currentContent = []; + let lastAIMessage = null; + + for (const part of message.content) { + if (part.type === ContentTypes.TEXT && part.tool_call_ids) { + // If there's pending content, add it as an AIMessage + if (currentContent.length > 0) { + messages.push(new AIMessage({ content: currentContent })); + currentContent = []; + } + + // Create a new AIMessage with this text and prepare for tool calls + lastAIMessage = new AIMessage({ + content: part.text || '', + }); + + messages.push(lastAIMessage); + } else if (part.type === ContentTypes.TOOL_CALL) { + if (!lastAIMessage) { + throw new Error('Invalid tool call structure: No preceding AIMessage with tool_call_ids'); + } + + // Note: `tool_calls` list is defined when constructed by `AIMessage` class, and outputs should be excluded from it + const { output, ...tool_call } = part.tool_call; + lastAIMessage.tool_calls.push(tool_call); + + // Add the corresponding ToolMessage + messages.push( + new ToolMessage({ + tool_call_id: tool_call.id, + name: tool_call.name, + content: output, + }), + ); + } else { + currentContent.push(part); + } + } + + if (currentContent.length > 0) { + messages.push(new AIMessage({ content: currentContent })); + } + } + + return messages; +}; + +module.exports = { + formatMessage, + formatFromLangChain, + formatAgentMessages, + formatLangChainMessages, +}; diff --git a/api/app/clients/tools/structured/TavilySearch.js b/api/app/clients/tools/structured/TavilySearch.js new file mode 100644 index 00000000000..3bb8f34f360 --- /dev/null +++ b/api/app/clients/tools/structured/TavilySearch.js @@ -0,0 +1,78 @@ +const { z } = require('zod'); +const { tool } = require('@langchain/core/tools'); +const { getEnvironmentVariable } = require('@langchain/core/utils/env'); + +function createTavilySearchTool(fields = {}) { + const envVar = 'TAVILY_API_KEY'; + const override = fields.override ?? false; + const apiKey = fields.apiKey ?? getApiKey(envVar, override); + const kwargs = fields?.kwargs ?? {}; + + function getApiKey(envVar, override) { + const key = getEnvironmentVariable(envVar); + if (!key && !override) { + throw new Error(`Missing ${envVar} environment variable.`); + } + return key; + } + + return tool( + async (input) => { + const { query, ...rest } = input; + + const requestBody = { + api_key: apiKey, + query, + ...rest, + ...kwargs, + }; + + const response = await fetch('https://api.tavily.com/search', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(requestBody), + }); + + const json = await response.json(); + if (!response.ok) { + throw new Error(`Request failed with status ${response.status}: ${json.error}`); + } + + return JSON.stringify(json); + }, + { + name: 'tavily_search_results_json', + description: + 'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.', + schema: z.object({ + query: z.string().min(1).describe('The search query string.'), + max_results: z + .number() + .min(1) + .max(10) + .optional() + .describe('The maximum number of search results to return. Defaults to 5.'), + search_depth: z + .enum(['basic', 'advanced']) + .optional() + .describe( + 'The depth of the search, affecting result quality and response time (`basic` or `advanced`). Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.', + ), + include_images: z + .boolean() + .optional() + .describe( + 'Whether to include a list of query-related images in the response. Default is False.', + ), + include_answer: z + .boolean() + .optional() + .describe('Whether to include answers in the search results. Default is False.'), + }), + }, + ); +} + +module.exports = createTavilySearchTool; diff --git a/api/models/Action.js b/api/models/Action.js index 86bd5d85948..7971f3e61a3 100644 --- a/api/models/Action.js +++ b/api/models/Action.js @@ -12,7 +12,7 @@ const Action = mongoose.model('action', actionSchema); * @param {string} searchParams.user - The user ID of the action's author. * @param {Object} updateData - An object containing the properties to update. * @param {mongoose.ClientSession} [session] - The transaction session to use. - * @returns {Promise} The updated or newly created action document as a plain object. + * @returns {Promise} The updated or newly created action document as a plain object. */ const updateAction = async (searchParams, updateData, session = null) => { const options = { new: true, upsert: true, session }; @@ -24,7 +24,7 @@ const updateAction = async (searchParams, updateData, session = null) => { * * @param {Object} searchParams - The search parameters to find matching actions. * @param {boolean} includeSensitive - Flag to include sensitive data in the metadata. - * @returns {Promise>} A promise that resolves to an array of action documents as plain objects. + * @returns {Promise>} A promise that resolves to an array of action documents as plain objects. */ const getActions = async (searchParams, includeSensitive = false) => { const actions = await Action.find(searchParams).lean(); @@ -55,7 +55,7 @@ const getActions = async (searchParams, includeSensitive = false) => { * @param {string} searchParams.action_id - The ID of the action to delete. * @param {string} searchParams.user - The user ID of the action's author. * @param {mongoose.ClientSession} [session] - The transaction session to use (optional). - * @returns {Promise} A promise that resolves to the deleted action document as a plain object, or null if no document was found. + * @returns {Promise} A promise that resolves to the deleted action document as a plain object, or null if no document was found. */ const deleteAction = async (searchParams, session = null) => { const options = session ? { session } : {}; diff --git a/api/models/Agent.js b/api/models/Agent.js new file mode 100644 index 00000000000..1ee783b101e --- /dev/null +++ b/api/models/Agent.js @@ -0,0 +1,84 @@ +const mongoose = require('mongoose'); +const agentSchema = require('./schema/agent'); + +const Agent = mongoose.model('agent', agentSchema); + +/** + * Create an agent with the provided data. + * @param {Object} agentData - The agent data to create. + * @returns {Promise} The created agent document as a plain object. + * @throws {Error} If the agent creation fails. + */ +const createAgent = async (agentData) => { + return await Agent.create(agentData); +}; + +/** + * Get an agent document based on the provided ID. + * + * @param {Object} searchParameter - The search parameters to find the agent to update. + * @param {string} searchParameter.id - The ID of the agent to update. + * @param {string} searchParameter.author - The user ID of the agent's author. + * @returns {Promise} The agent document as a plain object, or null if not found. + */ +const getAgent = async (searchParameter) => await Agent.findOne(searchParameter).lean(); + +/** + * Update an agent with new data without overwriting existing properties, + * or create a new agent if it doesn't exist, within a transaction session if provided. + * + * @param {Object} searchParameter - The search parameters to find the agent to update. + * @param {string} searchParameter.id - The ID of the agent to update. + * @param {string} searchParameter.author - The user ID of the agent's author. + * @param {Object} updateData - An object containing the properties to update. + * @param {mongoose.ClientSession} [session] - The transaction session to use (optional). + * @returns {Promise} The updated or newly created agent document as a plain object. + */ +const updateAgent = async (searchParameter, updateData, session = null) => { + const options = { new: true, upsert: true, session }; + return await Agent.findOneAndUpdate(searchParameter, updateData, options).lean(); +}; + +/** + * Deletes an agent based on the provided ID. + * + * @param {Object} searchParameter - The search parameters to find the agent to delete. + * @param {string} searchParameter.id - The ID of the agent to delete. + * @param {string} searchParameter.author - The user ID of the agent's author. + * @returns {Promise} Resolves when the agent has been successfully deleted. + */ +const deleteAgent = async (searchParameter) => { + return await Agent.findOneAndDelete(searchParameter); +}; + +/** + * Get all agents. + * @param {Object} searchParameter - The search parameters to find matching agents. + * @param {string} searchParameter.author - The user ID of the agent's author. + * @returns {Promise} A promise that resolves to an object containing the agents data and pagination info. + */ +const getListAgents = async (searchParameter) => { + const agents = await Agent.find(searchParameter, { + id: 1, + name: 1, + avatar: 1, + }).lean(); + const hasMore = agents.length > 0; + const firstId = agents.length > 0 ? agents[0].id : null; + const lastId = agents.length > 0 ? agents[agents.length - 1].id : null; + + return { + data: agents, + has_more: hasMore, + first_id: firstId, + last_id: lastId, + }; +}; + +module.exports = { + createAgent, + getAgent, + updateAgent, + deleteAgent, + getListAgents, +}; diff --git a/api/models/Message.js b/api/models/Message.js index 848570c4e3e..ccff9cb485c 100644 --- a/api/models/Message.js +++ b/api/models/Message.js @@ -35,82 +35,34 @@ const idSchema = z.string().uuid(); * @throws {Error} If there is an error in saving the message. */ async function saveMessage(req, params, metadata) { - try { - if (!req || !req.user || !req.user.id) { - throw new Error('User not authenticated'); - } - - const { - text, - error, - model, - files, - plugin, - sender, - plugins, - iconURL, - endpoint, - isEdited, - messageId, - unfinished, - tokenCount, - newMessageId, - finish_reason, - conversationId, - parentMessageId, - isCreatedByUser, - } = params; - - const validConvoId = idSchema.safeParse(conversationId); - if (!validConvoId.success) { - logger.warn(`Invalid conversation ID: ${conversationId}`); - if (metadata && metadata?.context) { - logger.info(`---\`saveMessage\` context: ${metadata.context}`); - } - - logger.info(`---Invalid conversation ID Params: - -${JSON.stringify(params, null, 2)} + if (!req?.user?.id) { + throw new Error('User not authenticated'); + } -`); - return; - } + const validConvoId = idSchema.safeParse(params.conversationId); + if (!validConvoId.success) { + logger.warn(`Invalid conversation ID: ${params.conversationId}`); + logger.info(`---\`saveMessage\` context: ${metadata?.context}`); + logger.info(`---Invalid conversation ID Params: ${JSON.stringify(params, null, 2)}`); + return; + } + try { const update = { + ...params, user: req.user.id, - iconURL, - endpoint, - messageId: newMessageId || messageId, - conversationId, - parentMessageId, - sender, - text, - isCreatedByUser, - isEdited, - finish_reason, - error, - unfinished, - tokenCount, - plugin, - plugins, - model, + messageId: params.newMessageId || params.messageId, }; - - if (files) { - update.files = files; - } - - const message = await Message.findOneAndUpdate({ messageId, user: req.user.id }, update, { - upsert: true, - new: true, - }); + const message = await Message.findOneAndUpdate( + { messageId: params.messageId, user: req.user.id }, + update, + { upsert: true, new: true }, + ); return message.toObject(); } catch (err) { logger.error('Error saving message:', err); - if (metadata && metadata?.context) { - logger.info(`---\`saveMessage\` context: ${metadata.context}`); - } + logger.info(`---\`saveMessage\` context: ${metadata?.context}`); throw err; } } diff --git a/api/models/schema/action.js b/api/models/schema/action.js index 9e9109adf78..f86a9bfa2d0 100644 --- a/api/models/schema/action.js +++ b/api/models/schema/action.js @@ -39,6 +39,7 @@ const actionSchema = new Schema({ default: 'action_prototype', }, settings: Schema.Types.Mixed, + agent_id: String, assistant_id: String, metadata: { api_key: String, // private, encrypted diff --git a/api/models/schema/agent.js b/api/models/schema/agent.js new file mode 100644 index 00000000000..97f0527916c --- /dev/null +++ b/api/models/schema/agent.js @@ -0,0 +1,66 @@ +const mongoose = require('mongoose'); + +const agentSchema = mongoose.Schema( + { + id: { + type: String, + index: true, + required: true, + }, + name: { + type: String, + }, + description: { + type: String, + }, + instructions: { + type: String, + }, + avatar: { + type: { + filepath: String, + source: String, + }, + default: undefined, + }, + provider: { + type: String, + required: true, + }, + model: { + type: String, + required: true, + }, + model_parameters: { + type: Object, + }, + access_level: { + type: Number, + }, + tools: { + type: [String], + default: undefined, + }, + tool_kwargs: { + type: [{ type: mongoose.Schema.Types.Mixed }], + }, + file_ids: { + type: [String], + default: undefined, + }, + actions: { + type: [String], + default: undefined, + }, + author: { + type: mongoose.Schema.Types.ObjectId, + ref: 'User', + required: true, + }, + }, + { + timestamps: true, + }, +); + +module.exports = agentSchema; diff --git a/api/package.json b/api/package.json index 43ccda33e41..43d8609a8e5 100644 --- a/api/package.json +++ b/api/package.json @@ -40,8 +40,10 @@ "@keyv/mongo": "^2.1.8", "@keyv/redis": "^2.8.1", "@langchain/community": "^0.0.46", + "@langchain/core": "^0.2.18", "@langchain/google-genai": "^0.0.11", "@langchain/google-vertexai": "^0.0.17", + "@librechat/agents": "^1.4.1", "axios": "^1.3.4", "bcryptjs": "^2.4.3", "cheerio": "^1.0.0-rc.12", diff --git a/api/server/controllers/ModelController.js b/api/server/controllers/ModelController.js index 022ece4c103..79dc81d6b02 100644 --- a/api/server/controllers/ModelController.js +++ b/api/server/controllers/ModelController.js @@ -2,6 +2,9 @@ const { CacheKeys } = require('librechat-data-provider'); const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config'); const { getLogStores } = require('~/cache'); +/** + * @param {ServerRequest} req + */ const getModelsConfig = async (req) => { const cache = getLogStores(CacheKeys.CONFIG_STORE); let modelsConfig = await cache.get(CacheKeys.MODELS_CONFIG); @@ -14,7 +17,7 @@ const getModelsConfig = async (req) => { /** * Loads the models from the config. - * @param {Express.Request} req - The Express request object. + * @param {ServerRequest} req - The Express request object. * @returns {Promise} The models config. */ async function loadModels(req) { diff --git a/api/server/controllers/agents/callbacks.js b/api/server/controllers/agents/callbacks.js new file mode 100644 index 00000000000..9649f56a53b --- /dev/null +++ b/api/server/controllers/agents/callbacks.js @@ -0,0 +1,83 @@ +const { GraphEvents, ToolEndHandler, ChatModelStreamHandler } = require('@librechat/agents'); + +/** @typedef {import('@librechat/agents').EventHandler} EventHandler */ +/** @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler */ +/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */ + +/** + * Sends message data in Server Sent Events format. + * @param {ServerResponse} res - The server response. + * @param {{ data: string | Record, event?: string }} event - The message event. + * @param {string} event.event - The type of event. + * @param {string} event.data - The message to be sent. + */ +const sendEvent = (res, event) => { + if (typeof event.data === 'string' && event.data.length === 0) { + return; + } + res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`); +}; + +/** + * Get default handlers for stream events. + * @param {{ res?: ServerResponse }} options - The options object. + * @returns {Record} The default handlers. + * @throws {Error} If the request is not found. + */ +function getDefaultHandlers({ res }) { + if (!res) { + throw new Error('Request not found'); + } + const handlers = { + // [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(), + [GraphEvents.TOOL_END]: new ToolEndHandler(), + [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(), + [GraphEvents.ON_RUN_STEP]: { + /** + * Handle ON_RUN_STEP event. + * @param {string} event - The event name. + * @param {StreamEventData} data - The event data. + */ + handle: (event, data) => { + sendEvent(res, { event, data }); + }, + }, + [GraphEvents.ON_RUN_STEP_DELTA]: { + /** + * Handle ON_RUN_STEP_DELTA event. + * @param {string} event - The event name. + * @param {StreamEventData} data - The event data. + */ + handle: (event, data) => { + sendEvent(res, { event, data }); + }, + }, + [GraphEvents.ON_RUN_STEP_COMPLETED]: { + /** + * Handle ON_RUN_STEP_COMPLETED event. + * @param {string} event - The event name. + * @param {StreamEventData & { result: ToolEndData }} data - The event data. + */ + handle: (event, data) => { + sendEvent(res, { event, data }); + }, + }, + [GraphEvents.ON_MESSAGE_DELTA]: { + /** + * Handle ON_MESSAGE_DELTA event. + * @param {string} event - The event name. + * @param {StreamEventData} data - The event data. + */ + handle: (event, data) => { + sendEvent(res, { event, data }); + }, + }, + }; + + return handlers; +} + +module.exports = { + sendEvent, + getDefaultHandlers, +}; diff --git a/api/server/controllers/agents/client.js b/api/server/controllers/agents/client.js new file mode 100644 index 00000000000..82e6a6f48a8 --- /dev/null +++ b/api/server/controllers/agents/client.js @@ -0,0 +1,462 @@ +// const { HttpsProxyAgent } = require('https-proxy-agent'); +// const { +// Constants, +// ImageDetail, +// EModelEndpoint, +// resolveHeaders, +// validateVisionModel, +// mapModelToAzureConfig, +// } = require('librechat-data-provider'); +const { Callback } = require('@librechat/agents'); +const { + EModelEndpoint, + providerEndpointMap, + removeNullishValues, +} = require('librechat-data-provider'); +const { + extractBaseURL, + // constructAzureURL, + // genAzureChatCompletion, +} = require('~/utils'); +const { + formatMessage, + formatAgentMessages, + createContextHandlers, +} = require('~/app/clients/prompts'); +const Tokenizer = require('~/server/services/Tokenizer'); +const BaseClient = require('~/app/clients/BaseClient'); +// const { sleep } = require('~/server/utils'); +const { createRun } = require('./run'); +const { logger } = require('~/config'); + +class AgentClient extends BaseClient { + constructor(options = {}) { + super(options); + + /** @type {'discard' | 'summarize'} */ + this.contextStrategy = 'discard'; + + /** @deprecated @type {true} - Is a Chat Completion Request */ + this.isChatCompletion = true; + + const { maxContextTokens, modelOptions = {}, ...clientOptions } = options; + + this.modelOptions = modelOptions; + this.maxContextTokens = maxContextTokens; + this.options = Object.assign({ endpoint: EModelEndpoint.agents }, clientOptions); + } + + setOptions(options) { + logger.info('[api/server/controllers/agents/client.js] setOptions', options); + } + + /** + * + * Checks if the model is a vision model based on request attachments and sets the appropriate options: + * - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request. + * - Sets `this.isVisionModel` to `true` if vision request. + * - Deletes `this.modelOptions.stop` if vision request. + * @param {MongoFile[]} attachments + */ + checkVisionRequest(attachments) { + logger.info( + '[api/server/controllers/agents/client.js #checkVisionRequest] not implemented', + attachments, + ); + // if (!attachments) { + // return; + // } + + // const availableModels = this.options.modelsConfig?.[this.options.endpoint]; + // if (!availableModels) { + // return; + // } + + // let visionRequestDetected = false; + // for (const file of attachments) { + // if (file?.type?.includes('image')) { + // visionRequestDetected = true; + // break; + // } + // } + // if (!visionRequestDetected) { + // return; + // } + + // this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels }); + // if (this.isVisionModel) { + // delete this.modelOptions.stop; + // return; + // } + + // for (const model of availableModels) { + // if (!validateVisionModel({ model, availableModels })) { + // continue; + // } + // this.modelOptions.model = model; + // this.isVisionModel = true; + // delete this.modelOptions.stop; + // return; + // } + + // if (!availableModels.includes(this.defaultVisionModel)) { + // return; + // } + // if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) { + // return; + // } + + // this.modelOptions.model = this.defaultVisionModel; + // this.isVisionModel = true; + // delete this.modelOptions.stop; + } + + getSaveOptions() { + return removeNullishValues( + Object.assign( + { + agent_id: this.options.agent.id, + modelLabel: this.options.modelLabel, + maxContextTokens: this.options.maxContextTokens, + resendFiles: this.options.resendFiles, + imageDetail: this.options.imageDetail, + spec: this.options.spec, + }, + this.modelOptions, + { + model: undefined, + // TODO: + // would need to be override settings; otherwise, model needs to be undefined + // model: this.override.model, + // instructions: this.override.instructions, + // additional_instructions: this.override.additional_instructions, + }, + ), + ); + } + + getBuildMessagesOptions(opts) { + return { + instructions: opts.instructions, + additional_instructions: opts.additional_instructions, + }; + } + + async buildMessages( + messages, + parentMessageId, + { instructions = null, additional_instructions = null }, + opts, + ) { + let orderedMessages = this.constructor.getMessagesForConversation({ + messages, + parentMessageId, + summary: this.shouldSummarize, + }); + + let payload; + /** @type {{ role: string; name: string; content: string } | undefined} */ + let systemMessage; + /** @type {number | undefined} */ + let promptTokens; + + /** @type {string} */ + let systemContent = `${instructions ?? ''}${additional_instructions ?? ''}`; + + if (this.options.attachments) { + const attachments = await this.options.attachments; + + if (this.message_file_map) { + this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments; + } else { + this.message_file_map = { + [orderedMessages[orderedMessages.length - 1].messageId]: attachments, + }; + } + + const files = await this.addImageURLs( + orderedMessages[orderedMessages.length - 1], + attachments, + ); + + this.options.attachments = files; + } + + if (this.message_file_map) { + this.contextHandlers = createContextHandlers( + this.options.req, + orderedMessages[orderedMessages.length - 1].text, + ); + } + + const formattedMessages = orderedMessages.map((message, i) => { + const formattedMessage = formatMessage({ + message, + userName: this.options?.name, + assistantName: this.options?.modelLabel, + }); + + const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount; + + /* If tokens were never counted, or, is a Vision request and the message has files, count again */ + if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) { + orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage); + } + + /* If message has files, calculate image token cost */ + // if (this.message_file_map && this.message_file_map[message.messageId]) { + // const attachments = this.message_file_map[message.messageId]; + // for (const file of attachments) { + // if (file.embedded) { + // this.contextHandlers?.processFile(file); + // continue; + // } + + // orderedMessages[i].tokenCount += this.calculateImageTokenCost({ + // width: file.width, + // height: file.height, + // detail: this.options.imageDetail ?? ImageDetail.auto, + // }); + // } + // } + + return formattedMessage; + }); + + if (this.contextHandlers) { + this.augmentedPrompt = await this.contextHandlers.createContext(); + systemContent = this.augmentedPrompt + systemContent; + } + + if (systemContent) { + systemContent = `${systemContent.trim()}`; + systemMessage = { + role: 'system', + name: 'instructions', + content: systemContent, + }; + + if (this.contextStrategy) { + const instructionTokens = this.getTokenCountForMessage(systemMessage); + if (instructionTokens >= 0) { + const firstMessageTokens = orderedMessages[0].tokenCount ?? 0; + orderedMessages[0].tokenCount = firstMessageTokens + instructionTokens; + } + } + } + + if (this.contextStrategy) { + ({ payload, promptTokens, messages } = await this.handleContextStrategy({ + orderedMessages, + formattedMessages, + /* prefer usage_metadata from final message */ + buildTokenMap: false, + })); + } + + const result = { + prompt: payload, + promptTokens, + messages, + }; + + if (promptTokens >= 0 && typeof opts?.getReqData === 'function') { + opts.getReqData({ promptTokens }); + } + + return result; + } + + /** @type {sendCompletion} */ + async sendCompletion(payload, opts = {}) { + this.modelOptions.user = this.user; + return await this.chatCompletion({ + payload, + onProgress: opts.onProgress, + abortController: opts.abortController, + }); + } + + // async recordTokenUsage({ promptTokens, completionTokens, context = 'message' }) { + // await spendTokens( + // { + // context, + // model: this.modelOptions.model, + // conversationId: this.conversationId, + // user: this.user ?? this.options.req.user?.id, + // endpointTokenConfig: this.options.endpointTokenConfig, + // }, + // { promptTokens, completionTokens }, + // ); + // } + + async chatCompletion({ payload, abortController = null }) { + try { + if (!abortController) { + abortController = new AbortController(); + } + + const baseURL = extractBaseURL(this.completionsUrl); + logger.debug('[api/server/controllers/agents/client.js] chatCompletion', { + baseURL, + payload, + }); + + // if (this.useOpenRouter) { + // opts.defaultHeaders = { + // 'HTTP-Referer': 'https://librechat.ai', + // 'X-Title': 'LibreChat', + // }; + // } + + // if (this.options.headers) { + // opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers }; + // } + + // if (this.options.proxy) { + // opts.httpAgent = new HttpsProxyAgent(this.options.proxy); + // } + + // if (this.isVisionModel) { + // modelOptions.max_tokens = 4000; + // } + + // /** @type {TAzureConfig | undefined} */ + // const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI]; + + // if ( + // (this.azure && this.isVisionModel && azureConfig) || + // (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI) + // ) { + // const { modelGroupMap, groupMap } = azureConfig; + // const { + // azureOptions, + // baseURL, + // headers = {}, + // serverless, + // } = mapModelToAzureConfig({ + // modelName: modelOptions.model, + // modelGroupMap, + // groupMap, + // }); + // opts.defaultHeaders = resolveHeaders(headers); + // this.langchainProxy = extractBaseURL(baseURL); + // this.apiKey = azureOptions.azureOpenAIApiKey; + + // const groupName = modelGroupMap[modelOptions.model].group; + // this.options.addParams = azureConfig.groupMap[groupName].addParams; + // this.options.dropParams = azureConfig.groupMap[groupName].dropParams; + // // Note: `forcePrompt` not re-assigned as only chat models are vision models + + // this.azure = !serverless && azureOptions; + // this.azureEndpoint = + // !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this); + // } + + // if (this.azure || this.options.azure) { + // /* Azure Bug, extremely short default `max_tokens` response */ + // if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') { + // modelOptions.max_tokens = 4000; + // } + + // /* Azure does not accept `model` in the body, so we need to remove it. */ + // delete modelOptions.model; + + // opts.baseURL = this.langchainProxy + // ? constructAzureURL({ + // baseURL: this.langchainProxy, + // azureOptions: this.azure, + // }) + // : this.azureEndpoint.split(/(? { + // delete modelOptions[param]; + // }); + // logger.debug('[api/server/controllers/agents/client.js #chatCompletion] dropped params', { + // dropParams: this.options.dropParams, + // modelOptions, + // }); + // } + + // const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE; + + const run = await createRun({ + agent: this.options.agent, + tools: this.options.tools, + toolMap: this.options.toolMap, + runId: this.responseMessageId, + modelOptions: this.modelOptions, + customHandlers: this.options.eventHandlers, + }); + + const config = { + configurable: { + provider: providerEndpointMap[this.options.agent.provider], + thread_id: this.conversationId, + }, + run_id: this.responseMessageId, + streamMode: 'values', + version: 'v2', + }; + + if (!run) { + throw new Error('Failed to create run'); + } + + const messages = formatAgentMessages(payload); + const runMessages = await run.processStream({ messages }, config, { + [Callback.TOOL_ERROR]: (graph, error, toolId) => { + logger.error( + '[api/server/controllers/agents/client.js #chatCompletion] Tool Error', + error, + toolId, + ); + }, + }); + // console.dir(runMessages, { depth: null }); + return runMessages; + } catch (err) { + logger.error( + '[api/server/controllers/agents/client.js #chatCompletion] Unhandled error type', + err, + ); + throw err; + } + } + + getEncoding() { + return this.modelOptions.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base'; + } + + /** + * Returns the token count of a given text. It also checks and resets the tokenizers if necessary. + * @param {string} text - The text to get the token count for. + * @returns {number} The token count of the given text. + */ + getTokenCount(text) { + const encoding = this.getEncoding(); + return Tokenizer.getTokenCount(text, encoding); + } +} + +module.exports = AgentClient; diff --git a/api/server/controllers/agents/demo.js b/api/server/controllers/agents/demo.js new file mode 100644 index 00000000000..c90745ba80d --- /dev/null +++ b/api/server/controllers/agents/demo.js @@ -0,0 +1,44 @@ +// Import the necessary modules +const path = require('path'); +const base = path.resolve(__dirname, '..', '..', '..', '..', 'api'); +console.log(base); +//api/server/controllers/agents/demo.js +require('module-alias')({ base }); +const connectDb = require('~/lib/db/connectDb'); +const AgentClient = require('./client'); + +// Define the user and message options +const user = 'user123'; +const parentMessageId = 'pmid123'; +const conversationId = 'cid456'; +const maxContextTokens = 200000; +const req = { + user: { id: user }, +}; +const progressOptions = { + res: {}, +}; + +// Define the message options +const messageOptions = { + user, + parentMessageId, + conversationId, + progressOptions, +}; + +async function main() { + await connectDb(); + const client = new AgentClient({ req, maxContextTokens }); + + const text = 'Hello, this is a test message.'; + + try { + let response = await client.sendMessage(text, messageOptions); + console.log('Response:', response); + } catch (error) { + console.error('Error sending message:', error); + } +} + +main(); diff --git a/api/server/controllers/agents/errors.js b/api/server/controllers/agents/errors.js new file mode 100644 index 00000000000..fb4de450852 --- /dev/null +++ b/api/server/controllers/agents/errors.js @@ -0,0 +1,153 @@ +// errorHandler.js +const { logger } = require('~/config'); +const getLogStores = require('~/cache/getLogStores'); +const { CacheKeys, ViolationTypes } = require('librechat-data-provider'); +const { recordUsage } = require('~/server/services/Threads'); +const { getConvo } = require('~/models/Conversation'); +const { sendResponse } = require('~/server/utils'); + +/** + * @typedef {Object} ErrorHandlerContext + * @property {OpenAIClient} openai - The OpenAI client + * @property {string} run_id - The run ID + * @property {boolean} completedRun - Whether the run has completed + * @property {string} assistant_id - The assistant ID + * @property {string} conversationId - The conversation ID + * @property {string} parentMessageId - The parent message ID + * @property {string} responseMessageId - The response message ID + * @property {string} endpoint - The endpoint being used + * @property {string} cacheKey - The cache key for the current request + */ + +/** + * @typedef {Object} ErrorHandlerDependencies + * @property {Express.Request} req - The Express request object + * @property {Express.Response} res - The Express response object + * @property {() => ErrorHandlerContext} getContext - Function to get the current context + * @property {string} [originPath] - The origin path for the error handler + */ + +/** + * Creates an error handler function with the given dependencies + * @param {ErrorHandlerDependencies} dependencies - The dependencies for the error handler + * @returns {(error: Error) => Promise} The error handler function + */ +const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/chat/' }) => { + const cache = getLogStores(CacheKeys.ABORT_KEYS); + + /** + * Handles errors that occur during the chat process + * @param {Error} error - The error that occurred + * @returns {Promise} + */ + return async (error) => { + const { + openai, + run_id, + endpoint, + cacheKey, + completedRun, + assistant_id, + conversationId, + parentMessageId, + responseMessageId, + } = getContext(); + + const defaultErrorMessage = + 'The Assistant run failed to initialize. Try sending a message in a new conversation.'; + const messageData = { + assistant_id, + conversationId, + parentMessageId, + sender: 'System', + user: req.user.id, + shouldSaveMessage: false, + messageId: responseMessageId, + endpoint, + }; + + if (error.message === 'Run cancelled') { + return res.end(); + } else if (error.message === 'Request closed' && completedRun) { + return; + } else if (error.message === 'Request closed') { + logger.debug(`[${originPath}] Request aborted on close`); + } else if (/Files.*are invalid/.test(error.message)) { + const errorMessage = `Files are invalid, or may not have uploaded yet.${ + endpoint === 'azureAssistants' + ? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.' + : '' + }`; + return sendResponse(req, res, messageData, errorMessage); + } else if (error?.message?.includes('string too long')) { + return sendResponse( + req, + res, + messageData, + 'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.', + ); + } else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) { + return sendResponse(req, res, messageData, error.message); + } else { + logger.error(`[${originPath}]`, error); + } + + if (!openai || !run_id) { + return sendResponse(req, res, messageData, defaultErrorMessage); + } + + await new Promise((resolve) => setTimeout(resolve, 2000)); + + try { + const status = await cache.get(cacheKey); + if (status === 'cancelled') { + logger.debug(`[${originPath}] Run already cancelled`); + return res.end(); + } + await cache.delete(cacheKey); + // const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id); + // logger.debug(`[${originPath}] Cancelled run:`, cancelledRun); + } catch (error) { + logger.error(`[${originPath}] Error cancelling run`, error); + } + + await new Promise((resolve) => setTimeout(resolve, 2000)); + + let run; + try { + // run = await openai.beta.threads.runs.retrieve(thread_id, run_id); + await recordUsage({ + ...run.usage, + model: run.model, + user: req.user.id, + conversationId, + }); + } catch (error) { + logger.error(`[${originPath}] Error fetching or processing run`, error); + } + + let finalEvent; + try { + // const errorContentPart = { + // text: { + // value: + // error?.message ?? 'There was an error processing your request. Please try again later.', + // }, + // type: ContentTypes.ERROR, + // }; + + finalEvent = { + final: true, + conversation: await getConvo(req.user.id, conversationId), + // runMessages, + }; + } catch (error) { + logger.error(`[${originPath}] Error finalizing error process`, error); + return sendResponse(req, res, messageData, 'The Assistant run failed'); + } + + return sendResponse(req, res, finalEvent); + }; +}; + +module.exports = { createErrorHandler }; diff --git a/api/server/controllers/agents/llm.js b/api/server/controllers/agents/llm.js new file mode 100644 index 00000000000..438a38b6cbc --- /dev/null +++ b/api/server/controllers/agents/llm.js @@ -0,0 +1,106 @@ +const { HttpsProxyAgent } = require('https-proxy-agent'); +const { resolveHeaders } = require('librechat-data-provider'); +const { createLLM } = require('~/app/clients/llm'); + +/** + * Initializes and returns a Language Learning Model (LLM) instance. + * + * @param {Object} options - Configuration options for the LLM. + * @param {string} options.model - The model identifier. + * @param {string} options.modelName - The specific name of the model. + * @param {number} options.temperature - The temperature setting for the model. + * @param {number} options.presence_penalty - The presence penalty for the model. + * @param {number} options.frequency_penalty - The frequency penalty for the model. + * @param {number} options.max_tokens - The maximum number of tokens for the model output. + * @param {boolean} options.streaming - Whether to use streaming for the model output. + * @param {Object} options.context - The context for the conversation. + * @param {number} options.tokenBuffer - The token buffer size. + * @param {number} options.initialMessageCount - The initial message count. + * @param {string} options.conversationId - The ID of the conversation. + * @param {string} options.user - The user identifier. + * @param {string} options.langchainProxy - The langchain proxy URL. + * @param {boolean} options.useOpenRouter - Whether to use OpenRouter. + * @param {Object} options.options - Additional options. + * @param {Object} options.options.headers - Custom headers for the request. + * @param {string} options.options.proxy - Proxy URL. + * @param {Object} options.options.req - The request object. + * @param {Object} options.options.res - The response object. + * @param {boolean} options.options.debug - Whether to enable debug mode. + * @param {string} options.apiKey - The API key for authentication. + * @param {Object} options.azure - Azure-specific configuration. + * @param {Object} options.abortController - The AbortController instance. + * @returns {Object} The initialized LLM instance. + */ +function initializeLLM(options) { + const { + model, + modelName, + temperature, + presence_penalty, + frequency_penalty, + max_tokens, + streaming, + user, + langchainProxy, + useOpenRouter, + options: { headers, proxy }, + apiKey, + azure, + } = options; + + const modelOptions = { + modelName: modelName || model, + temperature, + presence_penalty, + frequency_penalty, + user, + }; + + if (max_tokens) { + modelOptions.max_tokens = max_tokens; + } + + const configOptions = {}; + + if (langchainProxy) { + configOptions.basePath = langchainProxy; + } + + if (useOpenRouter) { + configOptions.basePath = 'https://openrouter.ai/api/v1'; + configOptions.baseOptions = { + headers: { + 'HTTP-Referer': 'https://librechat.ai', + 'X-Title': 'LibreChat', + }, + }; + } + + if (headers && typeof headers === 'object' && !Array.isArray(headers)) { + configOptions.baseOptions = { + headers: resolveHeaders({ + ...headers, + ...configOptions?.baseOptions?.headers, + }), + }; + } + + if (proxy) { + configOptions.httpAgent = new HttpsProxyAgent(proxy); + configOptions.httpsAgent = new HttpsProxyAgent(proxy); + } + + const llm = createLLM({ + modelOptions, + configOptions, + openAIApiKey: apiKey, + azure, + streaming, + }); + + return llm; +} + +module.exports = { + initializeLLM, +}; diff --git a/api/server/controllers/agents/request.js b/api/server/controllers/agents/request.js new file mode 100644 index 00000000000..6480205979b --- /dev/null +++ b/api/server/controllers/agents/request.js @@ -0,0 +1,150 @@ +const { Constants, getResponseSender } = require('librechat-data-provider'); +const { createAbortController, handleAbortError } = require('~/server/middleware'); +const { sendMessage } = require('~/server/utils'); +const { saveMessage } = require('~/models'); +const { logger } = require('~/config'); + +const AgentController = async (req, res, next, initializeClient, addTitle) => { + let { + text, + endpointOption, + conversationId, + modelDisplayLabel, + parentMessageId = null, + overrideParentMessageId = null, + } = req.body; + + let userMessage; + let userMessagePromise; + let promptTokens; + let userMessageId; + let responseMessageId; + + const sender = getResponseSender({ + ...endpointOption, + model: endpointOption.modelOptions.model, + modelDisplayLabel, + }); + const newConvo = !conversationId; + const user = req.user.id; + + const getReqData = (data = {}) => { + for (let key in data) { + if (key === 'userMessage') { + userMessage = data[key]; + userMessageId = data[key].messageId; + } else if (key === 'userMessagePromise') { + userMessagePromise = data[key]; + } else if (key === 'responseMessageId') { + responseMessageId = data[key]; + } else if (key === 'promptTokens') { + promptTokens = data[key]; + } else if (!conversationId && key === 'conversationId') { + conversationId = data[key]; + } + } + }; + + try { + const { client } = await initializeClient({ req, res, endpointOption }); + + const getAbortData = () => ({ + sender, + userMessage, + promptTokens, + conversationId, + userMessagePromise, + // text: getPartialText(), + messageId: responseMessageId, + parentMessageId: overrideParentMessageId ?? userMessageId, + }); + + const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData); + + res.on('close', () => { + logger.debug('[AgentController] Request closed'); + if (!abortController) { + return; + } else if (abortController.signal.aborted) { + return; + } else if (abortController.requestCompleted) { + return; + } + + abortController.abort(); + logger.debug('[AgentController] Request aborted on close'); + }); + + const messageOptions = { + user, + onStart, + getReqData, + conversationId, + parentMessageId, + abortController, + overrideParentMessageId, + progressOptions: { + res, + // parentMessageId: overrideParentMessageId || userMessageId, + }, + }; + + let response = await client.sendMessage(text, messageOptions); + + if (overrideParentMessageId) { + response.parentMessageId = overrideParentMessageId; + } + + response.endpoint = endpointOption.endpoint; + + const { conversation = {} } = await client.responsePromise; + conversation.title = + conversation && !conversation.title ? null : conversation?.title || 'New Chat'; + + if (client.options.attachments) { + userMessage.files = client.options.attachments; + conversation.model = endpointOption.modelOptions.model; + delete userMessage.image_urls; + } + + if (!abortController.signal.aborted) { + sendMessage(res, { + final: true, + conversation, + title: conversation.title, + requestMessage: userMessage, + responseMessage: response, + }); + res.end(); + + await saveMessage( + req, + { ...response, user }, + { context: 'api/server/controllers/agents/request.js - response end' }, + ); + } + + if (!client.skipSaveUserMessage) { + await saveMessage(req, userMessage, { + context: 'api/server/controllers/agents/request.js - don\'t skip saving user message', + }); + } + + if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) { + addTitle(req, { + text, + response, + client, + }); + } + } catch (error) { + handleAbortError(res, req, error, { + conversationId, + sender, + messageId: responseMessageId, + parentMessageId: userMessageId ?? parentMessageId, + }); + } +}; + +module.exports = AgentController; diff --git a/api/server/controllers/agents/run.js b/api/server/controllers/agents/run.js new file mode 100644 index 00000000000..d30d43bd9d3 --- /dev/null +++ b/api/server/controllers/agents/run.js @@ -0,0 +1,59 @@ +const { Run } = require('@librechat/agents'); +const { providerEndpointMap } = require('librechat-data-provider'); + +/** + * @typedef {import('@librechat/agents').t} t + * @typedef {import('@librechat/agents').StreamEventData} StreamEventData + * @typedef {import('@librechat/agents').ClientOptions} ClientOptions + * @typedef {import('@librechat/agents').EventHandler} EventHandler + * @typedef {import('@librechat/agents').GraphEvents} GraphEvents + * @typedef {import('@librechat/agents').IState} IState + */ + +/** + * Creates a new Run instance with custom handlers and configuration. + * + * @param {Object} options - The options for creating the Run instance. + * @param {Agent} options.agent - The agent for this run. + * @param {StructuredTool[] | undefined} [options.tools] - The tools to use in the run. + * @param {Record | undefined} [options.toolMap] - The tool map for the run. + * @param {Record | undefined} [options.customHandlers] - Custom event handlers. + * @param {string | undefined} [options.runId] - Optional run ID; otherwise, a new run ID will be generated. + * @param {ClientOptions} [options.modelOptions] - Optional model to use; if not provided, it will use the default from modelMap. + * @param {boolean} [options.streaming=true] - Whether to use streaming. + * @param {boolean} [options.streamUsage=true] - Whether to stream usage information. + * @returns {Promise>} A promise that resolves to a new Run instance. + */ +async function createRun({ + runId, + tools, + agent, + toolMap, + modelOptions, + customHandlers, + streaming = true, + streamUsage = true, +}) { + const llmConfig = Object.assign( + { + provider: providerEndpointMap[agent.provider], + streaming, + streamUsage, + }, + modelOptions, + ); + + return Run.create({ + graphConfig: { + runId, + llmConfig, + tools, + toolMap, + instructions: agent.instructions, + additional_instructions: agent.additional_instructions, + }, + customHandlers, + }); +} + +module.exports = { createRun }; diff --git a/api/server/controllers/agents/v1.js b/api/server/controllers/agents/v1.js new file mode 100644 index 00000000000..2a9911c5416 --- /dev/null +++ b/api/server/controllers/agents/v1.js @@ -0,0 +1,208 @@ +const { nanoid } = require('nanoid'); +const { FileContext } = require('librechat-data-provider'); +const { + getAgent, + createAgent, + updateAgent, + deleteAgent, + getListAgents, +} = require('~/models/Agent'); +const { getStrategyFunctions } = require('~/server/services/Files/strategies'); +const { uploadImageBuffer } = require('~/server/services/Files/process'); +const { deleteFileByFilter } = require('~/models/File'); +const { logger } = require('~/config'); + +/** + * Creates an Agent. + * @route POST /Agents + * @param {ServerRequest} req - The request object. + * @param {AgentCreateParams} req.body - The request body. + * @param {ServerResponse} res - The response object. + * @returns {Agent} 201 - success response - application/json + */ +const createAgentHandler = async (req, res) => { + try { + const { tools = [], provider, name, description, instructions, model, ...agentData } = req.body; + const { id: userId } = req.user; + + agentData.tools = tools + .map((tool) => (typeof tool === 'string' ? req.app.locals.availableTools[tool] : tool)) + .filter(Boolean); + + Object.assign(agentData, { + author: userId, + name, + description, + instructions, + provider, + model, + }); + + agentData.id = `agent_${nanoid()}`; + const agent = await createAgent(agentData); + res.status(201).json(agent); + } catch (error) { + logger.error('[/Agents] Error creating agent', error); + res.status(500).json({ error: error.message }); + } +}; + +/** + * Retrieves an Agent by ID. + * @route GET /Agents/:id + * @param {object} req - Express Request + * @param {object} req.params - Request params + * @param {string} req.params.id - Agent identifier. + * @returns {Agent} 200 - success response - application/json + * @returns {Error} 404 - Agent not found + */ +const getAgentHandler = async (req, res) => { + try { + const id = req.params.id; + const agent = await getAgent({ id }); + if (!agent) { + return res.status(404).json({ error: 'Agent not found' }); + } + return res.status(200).json(agent); + } catch (error) { + logger.error('[/Agents/:id] Error retrieving agent', error); + res.status(500).json({ error: error.message }); + } +}; + +/** + * Updates an Agent. + * @route PATCH /Agents/:id + * @param {object} req - Express Request + * @param {object} req.params - Request params + * @param {string} req.params.id - Agent identifier. + * @param {AgentUpdateParams} req.body - The Agent update parameters. + * @returns {Agent} 200 - success response - application/json + */ +const updateAgentHandler = async (req, res) => { + try { + const id = req.params.id; + const updatedAgent = await updateAgent({ id, author: req.user.id }, req.body); + return res.json(updatedAgent); + } catch (error) { + logger.error('[/Agents/:id] Error updating Agent', error); + res.status(500).json({ error: error.message }); + } +}; + +/** + * Deletes an Agent based on the provided ID. + * @route DELETE /Agents/:id + * @param {object} req - Express Request + * @param {object} req.params - Request params + * @param {string} req.params.id - Agent identifier. + * @returns {Agent} 200 - success response - application/json + */ +const deleteAgentHandler = async (req, res) => { + try { + const id = req.params.id; + const agent = await getAgent({ id }); + if (!agent) { + return res.status(404).json({ error: 'Agent not found' }); + } + await deleteAgent({ id, author: req.user.id }); + return res.json({ message: 'Agent deleted' }); + } catch (error) { + logger.error('[/Agents/:id] Error deleting Agent', error); + res.status(500).json({ error: error.message }); + } +}; + +/** + * + * @route GET /Agents + * @param {object} req - Express Request + * @param {object} req.query - Request query + * @param {string} [req.query.user] - The user ID of the agent's author. + * @returns {AgentListResponse} 200 - success response - application/json + */ +const getListAgentsHandler = async (req, res) => { + try { + const { user } = req.query; + const filter = user ? { author: user } : {}; + const data = await getListAgents(filter); + return res.json(data); + } catch (error) { + logger.error('[/Agents] Error listing Agents', error); + res.status(500).json({ error: error.message }); + } +}; + +/** + * Uploads and updates an avatar for a specific agent. + * @route POST /avatar/:agent_id + * @param {object} req - Express Request + * @param {object} req.params - Request params + * @param {string} req.params.agent_id - The ID of the agent. + * @param {Express.Multer.File} req.file - The avatar image file. + * @param {object} req.body - Request body + * @param {string} [req.body.avatar] - Optional avatar for the agent's avatar. + * @returns {Object} 200 - success response - application/json + */ +const uploadAgentAvatarHandler = async (req, res) => { + try { + const { agent_id } = req.params; + if (!agent_id) { + return res.status(400).json({ message: 'Agent ID is required' }); + } + + let { avatar: _avatar = '{}' } = req.body; + + const image = await uploadImageBuffer({ + req, + context: FileContext.avatar, + metadata: { + buffer: req.file.buffer, + }, + }); + + try { + _avatar = JSON.parse(_avatar); + } catch (error) { + logger.error('[/avatar/:agent_id] Error parsing avatar', error); + _avatar = {}; + } + + if (_avatar && _avatar.source) { + const { deleteFile } = getStrategyFunctions(_avatar.source); + try { + await deleteFile(req, { filepath: _avatar.filepath }); + await deleteFileByFilter({ filepath: _avatar.filepath }); + } catch (error) { + logger.error('[/avatar/:agent_id] Error deleting old avatar', error); + } + } + + const promises = []; + + const data = { + avatar: { + filepath: image.filepath, + source: req.app.locals.fileStrategy, + }, + }; + + promises.push(await updateAgent({ id: agent_id, author: req.user.id }, data)); + + const resolved = await Promise.all(promises); + res.status(201).json(resolved[0]); + } catch (error) { + const message = 'An error occurred while updating the Agent Avatar'; + logger.error(message, error); + res.status(500).json({ message }); + } +}; + +module.exports = { + createAgent: createAgentHandler, + getAgent: getAgentHandler, + updateAgent: updateAgentHandler, + deleteAgent: deleteAgentHandler, + getListAgents: getListAgentsHandler, + uploadAgentAvatar: uploadAgentAvatarHandler, +}; diff --git a/api/server/index.js b/api/server/index.js index 3cb969ddc0f..3fa5778301c 100644 --- a/api/server/index.js +++ b/api/server/index.js @@ -105,6 +105,7 @@ const startServer = async () => { app.use('/images/', validateImageRequest, routes.staticRoute); app.use('/api/share', routes.share); app.use('/api/roles', routes.roles); + app.use('/api/agents', routes.agents); app.use('/api/tags', routes.tags); diff --git a/api/server/middleware/buildEndpointOption.js b/api/server/middleware/buildEndpointOption.js index 376daa2ac45..83e06d77c33 100644 --- a/api/server/middleware/buildEndpointOption.js +++ b/api/server/middleware/buildEndpointOption.js @@ -1,4 +1,4 @@ -const { parseCompactConvo, EModelEndpoint } = require('librechat-data-provider'); +const { parseCompactConvo, EModelEndpoint, isAgentsEndpoint } = require('librechat-data-provider'); const { getModelsConfig } = require('~/server/controllers/ModelController'); const azureAssistants = require('~/server/services/Endpoints/azureAssistants'); const assistants = require('~/server/services/Endpoints/assistants'); @@ -6,6 +6,7 @@ const gptPlugins = require('~/server/services/Endpoints/gptPlugins'); const { processFiles } = require('~/server/services/Files/process'); const anthropic = require('~/server/services/Endpoints/anthropic'); const openAI = require('~/server/services/Endpoints/openAI'); +const agents = require('~/server/services/Endpoints/agents'); const custom = require('~/server/services/Endpoints/custom'); const google = require('~/server/services/Endpoints/google'); const enforceModelSpec = require('./enforceModelSpec'); @@ -15,6 +16,7 @@ const buildFunction = { [EModelEndpoint.openAI]: openAI.buildOptions, [EModelEndpoint.google]: google.buildOptions, [EModelEndpoint.custom]: custom.buildOptions, + [EModelEndpoint.agents]: agents.buildOptions, [EModelEndpoint.azureOpenAI]: openAI.buildOptions, [EModelEndpoint.anthropic]: anthropic.buildOptions, [EModelEndpoint.gptPlugins]: gptPlugins.buildOptions, @@ -59,12 +61,13 @@ async function buildEndpointOption(req, res, next) { } } - req.body.endpointOption = buildFunction[endpointType ?? endpoint]( - endpoint, - parsedBody, - endpointType, - ); + const endpointFn = buildFunction[endpointType ?? endpoint]; + const builder = isAgentsEndpoint(endpoint) ? (...args) => endpointFn(req, ...args) : endpointFn; + // TODO: use object params + req.body.endpointOption = builder(endpoint, parsedBody, endpointType); + + // TODO: use `getModelsConfig` only when necessary const modelsConfig = await getModelsConfig(req); req.body.endpointOption.modelsConfig = modelsConfig; diff --git a/api/server/routes/agents/actions.js b/api/server/routes/agents/actions.js new file mode 100644 index 00000000000..e79f749fc13 --- /dev/null +++ b/api/server/routes/agents/actions.js @@ -0,0 +1,166 @@ +const express = require('express'); +const { nanoid } = require('nanoid'); +const { actionDelimiter } = require('librechat-data-provider'); +const { encryptMetadata, domainParser } = require('~/server/services/ActionService'); +const { updateAction, getActions, deleteAction } = require('~/models/Action'); +const { getAgent, updateAgent } = require('~/models/Agent'); +const { logger } = require('~/config'); + +const router = express.Router(); + +/** + * Retrieves all user's actions + * @route GET /actions/ + * @param {string} req.params.id - Assistant identifier. + * @returns {Action[]} 200 - success response - application/json + */ +router.get('/', async (req, res) => { + try { + res.json(await getActions({ user: req.user.id })); + } catch (error) { + res.status(500).json({ error: error.message }); + } +}); + +/** + * Adds or updates actions for a specific agent. + * @route POST /actions/:agent_id + * @param {string} req.params.agent_id - The ID of the agent. + * @param {FunctionTool[]} req.body.functions - The functions to be added or updated. + * @param {string} [req.body.action_id] - Optional ID for the action. + * @param {ActionMetadata} req.body.metadata - Metadata for the action. + * @returns {Object} 200 - success response - application/json + */ +router.post('/:agent_id', async (req, res) => { + try { + const { agent_id } = req.params; + + /** @type {{ functions: FunctionTool[], action_id: string, metadata: ActionMetadata }} */ + const { functions, action_id: _action_id, metadata: _metadata } = req.body; + if (!functions.length) { + return res.status(400).json({ message: 'No functions provided' }); + } + + let metadata = encryptMetadata(_metadata); + + let { domain } = metadata; + domain = await domainParser(req, domain, true); + + if (!domain) { + return res.status(400).json({ message: 'No domain provided' }); + } + + const action_id = _action_id ?? nanoid(); + const initialPromises = []; + + // TODO: share agents + initialPromises.push(getAgent({ id: agent_id, author: req.user.id })); + if (_action_id) { + initialPromises.push(getActions({ action_id }, true)); + } + + /** @type {[Agent, [Action|undefined]]} */ + const [agent, actions_result] = await Promise.all(initialPromises); + if (!agent) { + return res.status(404).json({ message: 'Agent not found for adding action' }); + } + + if (actions_result && actions_result.length) { + const action = actions_result[0]; + metadata = { ...action.metadata, ...metadata }; + } + + const { actions: _actions = [] } = agent ?? {}; + const actions = []; + for (const action of _actions) { + const [_action_domain, current_action_id] = action.split(actionDelimiter); + if (current_action_id === action_id) { + continue; + } + + actions.push(action); + } + + actions.push(`${domain}${actionDelimiter}${action_id}`); + + /** @type {string[]}} */ + const { tools: _tools = [] } = agent; + + const tools = _tools + .filter((tool) => !(tool && (tool.includes(domain) || tool.includes(action_id)))) + .concat(functions.map((tool) => `${tool.function.name}${actionDelimiter}${domain}`)); + + const updatedAgent = await updateAgent( + { id: agent_id, author: req.user.id }, + { tools, actions }, + ); + /** @type {[Action]} */ + const updatedAction = await updateAction( + { action_id }, + { metadata, agent_id, user: req.user.id }, + ); + + const sensitiveFields = ['api_key', 'oauth_client_id', 'oauth_client_secret']; + for (let field of sensitiveFields) { + if (updatedAction.metadata[field]) { + delete updatedAction.metadata[field]; + } + } + + res.json([updatedAgent, updatedAction]); + } catch (error) { + const message = 'Trouble updating the Agent Action'; + logger.error(message, error); + res.status(500).json({ message }); + } +}); + +/** + * Deletes an action for a specific agent. + * @route DELETE /actions/:agent_id/:action_id + * @param {string} req.params.agent_id - The ID of the agent. + * @param {string} req.params.action_id - The ID of the action to delete. + * @returns {Object} 200 - success response - application/json + */ +router.delete('/:agent_id/:action_id', async (req, res) => { + try { + const { agent_id, action_id } = req.params; + + const agent = await getAgent({ id: agent_id, author: req.user.id }); + if (!agent) { + return res.status(404).json({ message: 'Agent not found for deleting action' }); + } + + const { tools = [], actions = [] } = agent; + + let domain = ''; + const updatedActions = actions.filter((action) => { + if (action.includes(action_id)) { + [domain] = action.split(actionDelimiter); + return false; + } + return true; + }); + + domain = await domainParser(req, domain, true); + + if (!domain) { + return res.status(400).json({ message: 'No domain provided' }); + } + + const updatedTools = tools.filter((tool) => !(tool && tool.includes(domain))); + + await updateAgent( + { id: agent_id, author: req.user.id }, + { tools: updatedTools, actions: updatedActions }, + ); + await deleteAction({ action_id }); + res.status(200).json({ message: 'Action deleted successfully' }); + } catch (error) { + const message = 'Trouble deleting the Agent Action'; + logger.error(message, error); + res.status(500).json({ message }); + } +}); + +module.exports = router; diff --git a/api/server/routes/agents/chat.js b/api/server/routes/agents/chat.js new file mode 100644 index 00000000000..353c2fc7b1f --- /dev/null +++ b/api/server/routes/agents/chat.js @@ -0,0 +1,35 @@ +const express = require('express'); + +const router = express.Router(); +const { + setHeaders, + handleAbort, + // validateModel, + // validateEndpoint, + buildEndpointOption, +} = require('~/server/middleware'); +const { initializeClient } = require('~/server/services/Endpoints/agents'); +const AgentController = require('~/server/controllers/agents/request'); + +router.post('/abort', handleAbort()); + +/** + * @route POST / + * @desc Chat with an assistant + * @access Public + * @param {express.Request} req - The request object, containing the request data. + * @param {express.Response} res - The response object, used to send back a response. + * @returns {void} + */ +router.post( + '/', + // validateModel, + // validateEndpoint, + buildEndpointOption, + setHeaders, + async (req, res, next) => { + await AgentController(req, res, next, initializeClient); + }, +); + +module.exports = router; diff --git a/api/server/routes/agents/index.js b/api/server/routes/agents/index.js new file mode 100644 index 00000000000..aa15400fe6c --- /dev/null +++ b/api/server/routes/agents/index.js @@ -0,0 +1,21 @@ +const express = require('express'); +const router = express.Router(); +const { + uaParser, + checkBan, + requireJwtAuth, + // concurrentLimiter, + // messageIpLimiter, + // messageUserLimiter, +} = require('~/server/middleware'); + +const v1 = require('./v1'); +const chat = require('./chat'); + +router.use(requireJwtAuth); +router.use(checkBan); +router.use(uaParser); +router.use('/', v1); +router.use('/chat', chat); + +module.exports = router; diff --git a/api/server/routes/agents/v1.js b/api/server/routes/agents/v1.js new file mode 100644 index 00000000000..1001873fe41 --- /dev/null +++ b/api/server/routes/agents/v1.js @@ -0,0 +1,77 @@ +const multer = require('multer'); +const express = require('express'); +const v1 = require('~/server/controllers/agents/v1'); +const actions = require('./actions'); + +const upload = multer(); +const router = express.Router(); + +/** + * Agent actions route. + * @route GET|POST /agents/actions + */ +router.use('/actions', actions); + +/** + * Get a list of available tools for agents. + * @route GET /agents/tools + * @returns {TPlugin[]} 200 - application/json + */ +router.use('/tools', (req, res) => { + res.json([]); +}); + +/** + * Creates an agent. + * @route POST /agents + * @param {AgentCreateParams} req.body - The agent creation parameters. + * @returns {Agent} 201 - Success response - application/json + */ +router.post('/', v1.createAgent); + +/** + * Retrieves an agent. + * @route GET /agents/:id + * @param {string} req.params.id - Agent identifier. + * @returns {Agent} 200 - Success response - application/json + */ +router.get('/:id', v1.getAgent); + +/** + * Updates an agent. + * @route PATCH /agents/:id + * @param {string} req.params.id - Agent identifier. + * @param {AgentUpdateParams} req.body - The agent update parameters. + * @returns {Agent} 200 - Success response - application/json + */ +router.patch('/:id', v1.updateAgent); + +/** + * Deletes an agent. + * @route DELETE /agents/:id + * @param {string} req.params.id - Agent identifier. + * @returns {Agent} 200 - success response - application/json + */ +router.delete('/:id', v1.deleteAgent); + +/** + * Returns a list of agents. + * @route GET /agents + * @param {AgentListParams} req.query - The agent list parameters for pagination and sorting. + * @returns {AgentListResponse} 200 - success response - application/json + */ +router.get('/', v1.getListAgents); + +// TODO: handle private agents + +/** + * Uploads and updates an avatar for a specific agent. + * @route POST /avatar/:agent_id + * @param {string} req.params.agent_id - The ID of the agent. + * @param {Express.Multer.File} req.file - The avatar image file. + * @param {string} [req.body.metadata] - Optional metadata for the agent's avatar. + * @returns {Object} 200 - success response - application/json + */ +router.post('/avatar/:agent_id', upload.single('file'), v1.uploadAgentAvatar); + +module.exports = router; diff --git a/api/server/routes/assistants/actions.js b/api/server/routes/assistants/actions.js index b780636c314..1646ac0a965 100644 --- a/api/server/routes/assistants/actions.js +++ b/api/server/routes/assistants/actions.js @@ -1,5 +1,5 @@ -const { v4 } = require('uuid'); const express = require('express'); +const { nanoid } = require('nanoid'); const { encryptMetadata, domainParser } = require('~/server/services/ActionService'); const { actionDelimiter, EModelEndpoint } = require('librechat-data-provider'); const { getOpenAIClient } = require('~/server/controllers/assistants/helpers'); @@ -9,20 +9,6 @@ const { logger } = require('~/config'); const router = express.Router(); -/** - * Retrieves all user's actions - * @route GET /actions/ - * @param {string} req.params.id - Assistant identifier. - * @returns {Action[]} 200 - success response - application/json - */ -router.get('/', async (req, res) => { - try { - res.json(await getActions()); - } catch (error) { - res.status(500).json({ error: error.message }); - } -}); - /** * Adds or updates actions for a specific assistant. * @route POST /actions/:assistant_id @@ -51,7 +37,7 @@ router.post('/:assistant_id', async (req, res) => { return res.status(400).json({ message: 'No domain provided' }); } - const action_id = _action_id ?? v4(); + const action_id = _action_id ?? nanoid(); const initialPromises = []; const { openai } = await getOpenAIClient({ req, res }); @@ -178,6 +164,10 @@ router.delete('/:assistant_id/:action_id/:model', async (req, res) => { domain = await domainParser(req, domain, true); + if (!domain) { + return res.status(400).json({ message: 'No domain provided' }); + } + const updatedTools = tools.filter( (tool) => !(tool.function && tool.function.name.includes(domain)), ); diff --git a/api/server/routes/index.js b/api/server/routes/index.js index 76ab423f012..90ba5c73add 100644 --- a/api/server/routes/index.js +++ b/api/server/routes/index.js @@ -1,51 +1,53 @@ -const ask = require('./ask'); -const edit = require('./edit'); +const assistants = require('./assistants'); +const categories = require('./categories'); +const tokenizer = require('./tokenizer'); +const endpoints = require('./endpoints'); +const staticRoute = require('./static'); const messages = require('./messages'); -const convos = require('./convos'); const presets = require('./presets'); const prompts = require('./prompts'); -const search = require('./search'); -const tokenizer = require('./tokenizer'); -const auth = require('./auth'); -const keys = require('./keys'); -const oauth = require('./oauth'); -const endpoints = require('./endpoints'); const balance = require('./balance'); -const models = require('./models'); const plugins = require('./plugins'); -const user = require('./user'); +const search = require('./search'); +const models = require('./models'); +const convos = require('./convos'); const config = require('./config'); -const assistants = require('./assistants'); +const agents = require('./agents'); +const roles = require('./roles'); +const oauth = require('./oauth'); const files = require('./files'); -const staticRoute = require('./static'); const share = require('./share'); -const categories = require('./categories'); -const roles = require('./roles'); const tags = require('./tags'); +const auth = require('./auth'); +const edit = require('./edit'); +const keys = require('./keys'); +const user = require('./user'); +const ask = require('./ask'); module.exports = { - search, ask, edit, - messages, - convos, - presets, - prompts, auth, keys, - oauth, user, - tokenizer, - endpoints, - balance, + tags, + roles, + oauth, + files, + share, + agents, + convos, + search, + prompts, + config, models, plugins, - config, + presets, + balance, + messages, + endpoints, + tokenizer, assistants, - files, - staticRoute, - share, categories, - roles, - tags, + staticRoute, }; diff --git a/api/server/services/ActionService.js b/api/server/services/ActionService.js index ff8fe5ac5b0..04a9b9829d1 100644 --- a/api/server/services/ActionService.js +++ b/api/server/services/ActionService.js @@ -6,6 +6,7 @@ const { isImageVisionTool, actionDomainSeparator, } = require('librechat-data-provider'); +const { tool } = require('@langchain/core/tools'); const { encryptV2, decryptV2 } = require('~/server/utils/crypto'); const { getActions, deleteActions } = require('~/models/Action'); const { deleteAssistant } = require('~/models/Assistant'); @@ -101,7 +102,8 @@ async function domainParser(req, domain, inverse = false) { * * @param {Object} searchParams - The parameters for loading action sets. * @param {string} searchParams.user - The user identifier. - * @param {string} searchParams.assistant_id - The assistant identifier. + * @param {string} [searchParams.agent_id]- The agent identifier. + * @param {string} [searchParams.assistant_id]- The assistant identifier. * @returns {Promise} A promise that resolves to an array of actions or `null` if no match. */ async function loadActionSets(searchParams) { @@ -114,10 +116,14 @@ async function loadActionSets(searchParams) { * @param {Object} params - The parameters for loading action sets. * @param {Action} params.action - The action set. Necessary for decrypting authentication values. * @param {ActionRequest} params.requestBuilder - The ActionRequest builder class to execute the API call. - * @returns { { _call: (toolInput: Object) => unknown} } An object with `_call` method to execute the tool input. + * @param {string | undefined} [params.name] - The name of the tool. + * @param {string | undefined} [params.description] - The description for the tool. + * @param {import('zod').ZodTypeAny | undefined} [params.zodSchema] - The Zod schema for tool input validation/definition + * @returns { Promsie unknown}> } An object with `_call` method to execute the tool input. */ -async function createActionTool({ action, requestBuilder }) { +async function createActionTool({ action, requestBuilder, zodSchema, name, description }) { action.metadata = await decryptMetadata(action.metadata); + /** @type {(toolInput: Object | string) => Promise} */ const _call = async (toolInput) => { try { requestBuilder.setParams(toolInput); @@ -142,6 +148,14 @@ async function createActionTool({ action, requestBuilder }) { } }; + if (name) { + return tool(_call, { + name, + description: description || '', + schema: zodSchema, + }); + } + return { _call, }; @@ -180,7 +194,7 @@ async function encryptMetadata(metadata) { * Decrypts sensitive metadata values for an action. * * @param {ActionMetadata} metadata - The action metadata to decrypt. - * @returns {ActionMetadata} The updated action metadata with decrypted values. + * @returns {Promise} The updated action metadata with decrypted values. */ async function decryptMetadata(metadata) { const decryptedMetadata = { ...metadata }; diff --git a/api/server/services/Config/EndpointService.js b/api/server/services/Config/EndpointService.js index 438cb81e80a..b2f82f383be 100644 --- a/api/server/services/Config/EndpointService.js +++ b/api/server/services/Config/EndpointService.js @@ -45,5 +45,7 @@ module.exports = { AZURE_ASSISTANTS_BASE_URL, EModelEndpoint.azureAssistants, ), + /* key will be part of separate config */ + [EModelEndpoint.agents]: generateConfig(process.env.I_AM_A_TEAPOT), }, }; diff --git a/api/server/services/Config/loadDefaultEConfig.js b/api/server/services/Config/loadDefaultEConfig.js index 379bd425015..df331d92fb0 100644 --- a/api/server/services/Config/loadDefaultEConfig.js +++ b/api/server/services/Config/loadDefaultEConfig.js @@ -9,13 +9,22 @@ const { config } = require('./EndpointService'); */ async function loadDefaultEndpointsConfig(req) { const { google, gptPlugins } = await loadAsyncEndpoints(req); - const { openAI, assistants, azureAssistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = - config; + const { + openAI, + agents, + assistants, + azureAssistants, + bingAI, + anthropic, + azureOpenAI, + chatGPTBrowser, + } = config; const enabledEndpoints = getEnabledEndpoints(); const endpointConfig = { [EModelEndpoint.openAI]: openAI, + [EModelEndpoint.agents]: agents, [EModelEndpoint.assistants]: assistants, [EModelEndpoint.azureAssistants]: azureAssistants, [EModelEndpoint.azureOpenAI]: azureOpenAI, diff --git a/api/server/services/Config/loadDefaultModels.js b/api/server/services/Config/loadDefaultModels.js index c550fbebbdd..e06b73c0c0a 100644 --- a/api/server/services/Config/loadDefaultModels.js +++ b/api/server/services/Config/loadDefaultModels.js @@ -29,6 +29,7 @@ async function loadDefaultModels(req) { return { [EModelEndpoint.openAI]: openAI, + [EModelEndpoint.agents]: openAI, [EModelEndpoint.google]: google, [EModelEndpoint.anthropic]: anthropic, [EModelEndpoint.gptPlugins]: gptPlugins, diff --git a/api/server/services/Endpoints/agents/build.js b/api/server/services/Endpoints/agents/build.js new file mode 100644 index 00000000000..256901057de --- /dev/null +++ b/api/server/services/Endpoints/agents/build.js @@ -0,0 +1,30 @@ +const { getAgent } = require('~/models/Agent'); +const { logger } = require('~/config'); + +const buildOptions = (req, endpoint, parsedBody) => { + const { agent_id, instructions, spec, ...rest } = parsedBody; + + const agentPromise = getAgent({ + id: agent_id, + // TODO: better author handling + author: req.user.id, + }).catch((error) => { + logger.error(`[/agents/:${agent_id}] Error retrieving agent during build options step`, error); + return undefined; + }); + + const endpointOption = { + agent: agentPromise, + endpoint, + agent_id, + instructions, + spec, + modelOptions: { + ...rest, + }, + }; + + return endpointOption; +}; + +module.exports = { buildOptions }; diff --git a/api/server/services/Endpoints/agents/index.js b/api/server/services/Endpoints/agents/index.js new file mode 100644 index 00000000000..8989f7df8c6 --- /dev/null +++ b/api/server/services/Endpoints/agents/index.js @@ -0,0 +1,7 @@ +const build = require('./build'); +const initialize = require('./initialize'); + +module.exports = { + ...build, + ...initialize, +}; diff --git a/api/server/services/Endpoints/agents/initialize.js b/api/server/services/Endpoints/agents/initialize.js new file mode 100644 index 00000000000..8627775ce5d --- /dev/null +++ b/api/server/services/Endpoints/agents/initialize.js @@ -0,0 +1,119 @@ +// const { +// ErrorTypes, +// EModelEndpoint, +// resolveHeaders, +// mapModelToAzureConfig, +// } = require('librechat-data-provider'); +// const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); +// const { isEnabled, isUserProvided } = require('~/server/utils'); +// const { getAzureCredentials } = require('~/utils'); +// const { OpenAIClient } = require('~/app'); + +const { z } = require('zod'); +const { tool } = require('@langchain/core/tools'); +const { EModelEndpoint, providerEndpointMap } = require('librechat-data-provider'); +const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks'); +// for testing purposes +// const createTavilySearchTool = require('~/app/clients/tools/structured/TavilySearch'); +const initAnthropic = require('~/server/services/Endpoints/anthropic/initializeClient'); +const initOpenAI = require('~/server/services/Endpoints/openAI/initializeClient'); +const { loadAgentTools } = require('~/server/services/ToolService'); +const AgentClient = require('~/server/controllers/agents/client'); +const { getModelMaxTokens } = require('~/utils'); + +/* For testing errors */ +const _getWeather = tool( + async ({ location }) => { + if (location === 'SAN FRANCISCO') { + return 'It\'s 60 degrees and foggy'; + } else if (location.toLowerCase() === 'san francisco') { + throw new Error('Input queries must be all capitals'); + } else { + throw new Error('Invalid input.'); + } + }, + { + name: 'get_weather', + description: 'Call to get the current weather', + schema: z.object({ + location: z.string(), + }), + }, +); + +const providerConfigMap = { + [EModelEndpoint.openAI]: initOpenAI, + [EModelEndpoint.azureOpenAI]: initOpenAI, + [EModelEndpoint.anthropic]: initAnthropic, +}; + +const initializeClient = async ({ req, res, endpointOption }) => { + if (!endpointOption) { + throw new Error('Endpoint option not provided'); + } + + // TODO: use endpointOption to determine options/modelOptions + const eventHandlers = getDefaultHandlers({ res }); + + // const tools = [createTavilySearchTool()]; + // const tools = [_getWeather]; + // const tool_calls = [{ name: 'getPeople_action_swapi---dev' }]; + // const tool_calls = [{ name: 'dalle' }]; + // const tool_calls = [{ name: 'getItmOptions_action_YWlhcGkzLn' }]; + // const tool_calls = [{ name: 'tavily_search_results_json' }]; + // const tool_calls = [ + // { name: 'searchListings_action_emlsbG93NT' }, + // { name: 'searchAddress_action_emlsbG93NT' }, + // { name: 'searchMLS_action_emlsbG93NT' }, + // { name: 'searchCoordinates_action_emlsbG93NT' }, + // { name: 'searchUrl_action_emlsbG93NT' }, + // { name: 'getPropertyDetails_action_emlsbG93NT' }, + // ]; + + if (!endpointOption.agent) { + throw new Error('No agent promise provided'); + } + + /** @type {Agent} */ + const agent = await endpointOption.agent; + const { tools, toolMap } = await loadAgentTools({ + req, + tools: agent.tools, + agent_id: agent.id, + // openAIApiKey: process.env.OPENAI_API_KEY, + }); + + let modelOptions = { model: agent.model }; + const getOptions = providerConfigMap[agent.provider]; + if (!getOptions) { + throw new Error(`Provider ${agent.provider} not supported`); + } + + // TODO: pass-in override settings that are specific to current run + endpointOption.modelOptions.model = agent.model; + const options = await getOptions({ + req, + res, + endpointOption, + optionsOnly: true, + overrideEndpoint: agent.provider, + overrideModel: agent.model, + }); + modelOptions = Object.assign(modelOptions, options.llmConfig); + + const client = new AgentClient({ + req, + agent, + tools, + toolMap, + modelOptions, + eventHandlers, + configOptions: options.configOptions, + maxContextTokens: + agent.max_context_tokens ?? + getModelMaxTokens(modelOptions.model, providerEndpointMap[agent.provider]), + }); + return { client }; +}; + +module.exports = { initializeClient }; diff --git a/api/server/services/Endpoints/anthropic/initializeClient.js b/api/server/services/Endpoints/anthropic/initializeClient.js index 42b902b1fc4..2ca68d14cf9 100644 --- a/api/server/services/Endpoints/anthropic/initializeClient.js +++ b/api/server/services/Endpoints/anthropic/initializeClient.js @@ -1,8 +1,9 @@ const { EModelEndpoint } = require('librechat-data-provider'); const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService'); +const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm'); const { AnthropicClient } = require('~/app'); -const initializeClient = async ({ req, res, endpointOption }) => { +const initializeClient = async ({ req, res, endpointOption, optionsOnly }) => { const { ANTHROPIC_API_KEY, ANTHROPIC_REVERSE_PROXY, PROXY } = process.env; const expiresAt = req.body.key; const isUserProvided = ANTHROPIC_API_KEY === 'user_provided'; @@ -34,6 +35,18 @@ const initializeClient = async ({ req, res, endpointOption }) => { clientOptions.streamRate = allConfig.streamRate; } + if (optionsOnly) { + const requestOptions = Object.assign( + { + reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null, + proxy: PROXY ?? null, + modelOptions: endpointOption.modelOptions, + }, + clientOptions, + ); + return getLLMConfig(anthropicApiKey, requestOptions); + } + const client = new AnthropicClient(anthropicApiKey, { req, res, diff --git a/api/server/services/Endpoints/anthropic/llm.js b/api/server/services/Endpoints/anthropic/llm.js new file mode 100644 index 00000000000..937d66e9264 --- /dev/null +++ b/api/server/services/Endpoints/anthropic/llm.js @@ -0,0 +1,55 @@ +const { HttpsProxyAgent } = require('https-proxy-agent'); +const { anthropicSettings, removeNullishValues } = require('librechat-data-provider'); + +/** + * Generates configuration options for creating an Anthropic language model (LLM) instance. + * + * @param {string} apiKey - The API key for authentication with Anthropic. + * @param {Object} [options={}] - Additional options for configuring the LLM. + * @param {Object} [options.modelOptions] - Model-specific options. + * @param {string} [options.modelOptions.model] - The name of the model to use. + * @param {number} [options.modelOptions.maxOutputTokens] - The maximum number of tokens to generate. + * @param {number} [options.modelOptions.temperature] - Controls randomness in output generation. + * @param {number} [options.modelOptions.topP] - Controls diversity of output generation. + * @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider. + * @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens. + * @param {boolean} [options.modelOptions.stream] - Whether to stream the response. + * @param {string} [options.proxy] - Proxy server URL. + * @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used. + * + * @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed. + */ +function getLLMConfig(apiKey, options = {}) { + const defaultOptions = { + model: anthropicSettings.model.default, + maxOutputTokens: anthropicSettings.maxOutputTokens.default, + stream: true, + }; + + const mergedOptions = Object.assign(defaultOptions, options.modelOptions); + + const requestOptions = { + apiKey, + model: mergedOptions.model, + stream: mergedOptions.stream, + temperature: mergedOptions.temperature, + top_p: mergedOptions.topP, + top_k: mergedOptions.topK, + stop_sequences: mergedOptions.stop, + max_tokens: + mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model), + }; + + const configOptions = {}; + if (options.proxy) { + configOptions.httpAgent = new HttpsProxyAgent(options.proxy); + } + + if (options.reverseProxyUrl) { + configOptions.baseURL = options.reverseProxyUrl; + } + + return { llmConfig: removeNullishValues(requestOptions), configOptions }; +} + +module.exports = { getLLMConfig }; diff --git a/api/server/services/Endpoints/openAI/initializeClient.js b/api/server/services/Endpoints/openAI/initializeClient.js index 1518cba0283..b72b3d32c44 100644 --- a/api/server/services/Endpoints/openAI/initializeClient.js +++ b/api/server/services/Endpoints/openAI/initializeClient.js @@ -5,11 +5,19 @@ const { mapModelToAzureConfig, } = require('librechat-data-provider'); const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService'); +const { getLLMConfig } = require('~/server/services/Endpoints/openAI/llm'); const { isEnabled, isUserProvided } = require('~/server/utils'); const { getAzureCredentials } = require('~/utils'); const { OpenAIClient } = require('~/app'); -const initializeClient = async ({ req, res, endpointOption }) => { +const initializeClient = async ({ + req, + res, + endpointOption, + optionsOnly, + overrideEndpoint, + overrideModel, +}) => { const { PROXY, OPENAI_API_KEY, @@ -19,7 +27,9 @@ const initializeClient = async ({ req, res, endpointOption }) => { OPENAI_SUMMARIZE, DEBUG_OPENAI, } = process.env; - const { key: expiresAt, endpoint, model: modelName } = req.body; + const { key: expiresAt } = req.body; + const modelName = overrideModel ?? req.body.model; + const endpoint = overrideEndpoint ?? req.body.endpoint; const contextStrategy = isEnabled(OPENAI_SUMMARIZE) ? 'summarize' : null; const credentials = { @@ -45,12 +55,10 @@ const initializeClient = async ({ req, res, endpointOption }) => { let baseURL = userProvidesURL ? userValues?.baseURL : baseURLOptions[endpoint]; const clientOptions = { - debug: isEnabled(DEBUG_OPENAI), contextStrategy, - reverseProxyUrl: baseURL ? baseURL : null, proxy: PROXY ?? null, - req, - res, + debug: isEnabled(DEBUG_OPENAI), + reverseProxyUrl: baseURL ? baseURL : null, ...endpointOption, }; @@ -119,7 +127,17 @@ const initializeClient = async ({ req, res, endpointOption }) => { throw new Error(`${endpoint} API Key not provided.`); } - const client = new OpenAIClient(apiKey, clientOptions); + if (optionsOnly) { + const requestOptions = Object.assign( + { + modelOptions: endpointOption.modelOptions, + }, + clientOptions, + ); + return getLLMConfig(apiKey, requestOptions); + } + + const client = new OpenAIClient(apiKey, Object.assign({ req, res }, clientOptions)); return { client, openAIApiKey: apiKey, diff --git a/api/server/services/Endpoints/openAI/llm.js b/api/server/services/Endpoints/openAI/llm.js new file mode 100644 index 00000000000..3817224a4ba --- /dev/null +++ b/api/server/services/Endpoints/openAI/llm.js @@ -0,0 +1,120 @@ +const { HttpsProxyAgent } = require('https-proxy-agent'); +const { sanitizeModelName, constructAzureURL } = require('~/utils'); +const { isEnabled } = require('~/server/utils'); + +/** + * Generates configuration options for creating a language model (LLM) instance. + * @param {string} apiKey - The API key for authentication. + * @param {Object} options - Additional options for configuring the LLM. + * @param {Object} [options.modelOptions] - Model-specific options. + * @param {string} [options.modelOptions.model] - The name of the model to use. + * @param {number} [options.modelOptions.temperature] - Controls randomness in output generation (0-2). + * @param {number} [options.modelOptions.top_p] - Controls diversity via nucleus sampling (0-1). + * @param {number} [options.modelOptions.frequency_penalty] - Reduces repetition of token sequences (-2 to 2). + * @param {number} [options.modelOptions.presence_penalty] - Encourages discussing new topics (-2 to 2). + * @param {number} [options.modelOptions.max_tokens] - The maximum number of tokens to generate. + * @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens. + * @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used. + * @param {boolean} [options.useOpenRouter] - Flag to use OpenRouter API. + * @param {Object} [options.headers] - Additional headers for API requests. + * @param {string} [options.proxy] - Proxy server URL. + * @param {Object} [options.azure] - Azure-specific configurations. + * @param {boolean} [options.streaming] - Whether to use streaming mode. + * @param {Object} [options.addParams] - Additional parameters to add to the model options. + * @param {string[]} [options.dropParams] - Parameters to remove from the model options. + * @returns {Object} Configuration options for creating an LLM instance. + */ +function getLLMConfig(apiKey, options = {}) { + const { + modelOptions = {}, + reverseProxyUrl, + useOpenRouter, + headers, + proxy, + azure, + streaming = true, + addParams, + dropParams, + } = options; + + let llmConfig = { + model: 'gpt-4o-mini', + streaming, + }; + + Object.assign(llmConfig, modelOptions); + + if (addParams && typeof addParams === 'object') { + Object.assign(llmConfig, addParams); + } + + if (dropParams && Array.isArray(dropParams)) { + dropParams.forEach((param) => { + delete llmConfig[param]; + }); + } + + const configOptions = {}; + + // Handle OpenRouter or custom reverse proxy + if (useOpenRouter || reverseProxyUrl === 'https://openrouter.ai/api/v1') { + configOptions.basePath = 'https://openrouter.ai/api/v1'; + configOptions.baseOptions = { + headers: Object.assign( + { + 'HTTP-Referer': 'https://librechat.ai', + 'X-Title': 'LibreChat', + }, + headers, + ), + }; + } else if (reverseProxyUrl) { + configOptions.basePath = reverseProxyUrl; + if (headers) { + configOptions.baseOptions = { headers }; + } + } + + if (proxy) { + const proxyAgent = new HttpsProxyAgent(proxy); + Object.assign(configOptions, { + httpAgent: proxyAgent, + httpsAgent: proxyAgent, + }); + } + + if (azure) { + const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME); + azure.azureOpenAIApiDeploymentName = useModelName + ? sanitizeModelName(llmConfig.model) + : azure.azureOpenAIApiDeploymentName; + + if (process.env.AZURE_OPENAI_DEFAULT_MODEL) { + llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL; + } + + if (configOptions.basePath) { + const azureURL = constructAzureURL({ + baseURL: configOptions.basePath, + azureOptions: azure, + }); + azure.azureOpenAIBasePath = azureURL.split(`/${azure.azureOpenAIApiDeploymentName}`)[0]; + } + + Object.assign(llmConfig, azure); + llmConfig.model = llmConfig.azureOpenAIApiDeploymentName; + } else { + llmConfig.openAIApiKey = apiKey; + // Object.assign(llmConfig, { + // configuration: { apiKey }, + // }); + } + + if (process.env.OPENAI_ORGANIZATION && this.azure) { + llmConfig.organization = process.env.OPENAI_ORGANIZATION; + } + + return { llmConfig, configOptions }; +} + +module.exports = { getLLMConfig }; diff --git a/api/server/services/Tokenizer.js b/api/server/services/Tokenizer.js new file mode 100644 index 00000000000..b88d5f8856d --- /dev/null +++ b/api/server/services/Tokenizer.js @@ -0,0 +1,64 @@ +const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken'); +const { logger } = require('~/config'); + +class Tokenizer { + constructor() { + this.tokenizersCache = {}; + this.tokenizerCallsCount = 0; + } + + getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) { + let tokenizer; + if (this.tokenizersCache[encoding]) { + tokenizer = this.tokenizersCache[encoding]; + } else { + if (isModelName) { + tokenizer = encodingForModel(encoding, extendSpecialTokens); + } else { + tokenizer = getEncoding(encoding, extendSpecialTokens); + } + this.tokenizersCache[encoding] = tokenizer; + } + return tokenizer; + } + + freeAndResetAllEncoders() { + try { + Object.keys(this.tokenizersCache).forEach((key) => { + if (this.tokenizersCache[key]) { + this.tokenizersCache[key].free(); + delete this.tokenizersCache[key]; + } + }); + this.tokenizerCallsCount = 1; + } catch (error) { + logger.error('[Tokenizer] Free and reset encoders error', error); + } + } + + resetTokenizersIfNecessary() { + if (this.tokenizerCallsCount >= 25) { + if (this.options?.debug) { + logger.debug('[Tokenizer] freeAndResetAllEncoders: reached 25 encodings, resetting...'); + } + this.freeAndResetAllEncoders(); + } + this.tokenizerCallsCount++; + } + + getTokenCount(text, encoding = 'cl100k_base') { + this.resetTokenizersIfNecessary(); + try { + const tokenizer = this.getTokenizer(encoding); + return tokenizer.encode(text, 'all').length; + } catch (error) { + this.freeAndResetAllEncoders(); + const tokenizer = this.getTokenizer(encoding); + return tokenizer.encode(text, 'all').length; + } + } +} + +const tokenizerService = new Tokenizer(); + +module.exports = tokenizerService; diff --git a/api/server/services/ToolService.js b/api/server/services/ToolService.js index 5e9b5112a0e..109fbeddea4 100644 --- a/api/server/services/ToolService.js +++ b/api/server/services/ToolService.js @@ -1,6 +1,7 @@ const fs = require('fs'); const path = require('path'); const { StructuredTool } = require('langchain/tools'); +const { tool: toolFn } = require('@langchain/core/tools'); const { zodToJsonSchema } = require('zod-to-json-schema'); const { Calculator } = require('langchain/tools/calculator'); const { @@ -180,7 +181,7 @@ async function processRequiredActions(client, requiredActions) { const tools = requiredActions.map((action) => action.tool); const loadedTools = await loadTools({ user: client.req.user.id, - model: client.req.body.model ?? 'gpt-3.5-turbo-1106', + model: client.req.body.model ?? 'gpt-4o-mini', tools, functions: true, options: { @@ -372,8 +373,120 @@ async function processRequiredActions(client, requiredActions) { }; } +/** + * Processes the runtime tool calls and returns a combined toolMap. + * @param {Object} params - Run params containing user and request information. + * @param {ServerRequest} params.req - The request object. + * @param {string} params.agent_id - The agent ID. + * @param {string[]} params.tools - The agent's available tools. + * @param {string | undefined} [params.openAIApiKey] - The OpenAI API key. + * @returns {Promise<{ tools?: StructuredTool[]; toolMap?: Record}>} The combined toolMap. + */ +async function loadAgentTools({ req, agent_id, tools, openAIApiKey }) { + if (!tools || tools.length === 0) { + return {}; + } + const loadedTools = await loadTools({ + user: req.user.id, + // model: req.body.model ?? 'gpt-4o-mini', + tools, + functions: true, + options: { + req, + openAIApiKey, + returnMetadata: true, + processFileURL, + uploadImageBuffer, + fileStrategy: req.app.locals.fileStrategy, + }, + skipSpecs: true, + }); + + const agentTools = []; + for (let i = 0; i < loadedTools.length; i++) { + const tool = loadedTools[i]; + + const toolInstance = toolFn( + async (...args) => { + return tool['_call'](...args); + }, + { + name: tool.name, + description: tool.description, + schema: tool.schema, + }, + ); + + agentTools.push(toolInstance); + } + + const ToolMap = loadedTools.reduce((map, tool) => { + map[tool.name] = tool; + return map; + }, {}); + + let actionSets = []; + const ActionToolMap = {}; + + for (const toolName of tools) { + if (!ToolMap[toolName]) { + if (!actionSets.length) { + actionSets = (await loadActionSets({ agent_id })) ?? []; + } + + let actionSet = null; + let currentDomain = ''; + for (let action of actionSets) { + const domain = await domainParser(req, action.metadata.domain, true); + if (toolName.includes(domain)) { + currentDomain = domain; + actionSet = action; + break; + } + } + + if (actionSet) { + const validationResult = validateAndParseOpenAPISpec(actionSet.metadata.raw_spec); + if (validationResult.spec) { + const { requestBuilders, functionSignatures, zodSchemas } = openapiToFunction( + validationResult.spec, + true, + ); + const functionName = toolName.replace(`${actionDelimiter}${currentDomain}`, ''); + const functionSig = functionSignatures.find((sig) => sig.name === functionName); + const requestBuilder = requestBuilders[functionName]; + const zodSchema = zodSchemas[functionName]; + + if (requestBuilder) { + const tool = await createActionTool({ + action: actionSet, + requestBuilder, + zodSchema, + name: toolName, + description: functionSig.description, + }); + agentTools.push(tool); + ActionToolMap[toolName] = tool; + } + } + } + } + } + + if (tools.length > 0 && agentTools.length === 0) { + throw new Error('No tools found for the specified tool calls.'); + } + + const toolMap = { ...ToolMap, ...ActionToolMap }; + return { + tools: agentTools, + toolMap, + }; +} + module.exports = { - formatToOpenAIAssistantTool, + loadAgentTools, loadAndFormatTools, processRequiredActions, + formatToOpenAIAssistantTool, }; diff --git a/api/typedefs.js b/api/typedefs.js index f69f7b1843a..6591d192b12 100644 --- a/api/typedefs.js +++ b/api/typedefs.js @@ -8,6 +8,36 @@ * @memberof typedefs */ +/** + * @exports ServerRequest + * @typedef {import('express').Request} ServerRequest + * @memberof typedefs + */ + +/** + * @exports ServerResponse + * @typedef {import('express').Response} ServerResponse + * @memberof typedefs + */ + +/** + * @exports ClientCallbacks + * @typedef {import('@librechat/agents').ClientCallbacks} ClientCallbacks + * @memberof typedefs + */ + +/** + * @exports StreamEventData + * @typedef {import('@librechat/agents').StreamEventData} StreamEventData + * @memberof typedefs + */ + +/** + * @exports ToolEndData + * @typedef {import('@librechat/agents').ToolEndData} ToolEndData + * @memberof typedefs + */ + /** * @exports Ollama * @typedef {import('ollama').Ollama} Ollama @@ -724,6 +754,36 @@ * @memberof typedefs */ +/** + * @exports Agent + * @typedef {import('librechat-data-provider').Agent} Agent + * @memberof typedefs + */ + +/** + * @exports AgentCreateParams + * @typedef {import('librechat-data-provider').AgentCreateParams} AgentCreateParams + * @memberof typedefs + */ + +/** + * @exports AgentUpdateParams + * @typedef {import('librechat-data-provider').AgentUpdateParams} AgentUpdateParams + * @memberof typedefs + */ + +/** + * @exports AgentListParams + * @typedef {import('librechat-data-provider').AgentListParams} AgentListParams + * @memberof typedefs + */ + +/** + * @exports AgentListResponse + * @typedef {import('librechat-data-provider').AgentListResponse} AgentListResponse + * @memberof typedefs + */ + /** * Represents details of the message creation by the run step, including the ID of the created message. * diff --git a/api/utils/tokens.js b/api/utils/tokens.js index f236fa8f4d8..83246c5b74d 100644 --- a/api/utils/tokens.js +++ b/api/utils/tokens.js @@ -68,6 +68,7 @@ const aggregateModels = { ...openAIModels, ...googleModels, ...anthropicModels, const maxTokensMap = { [EModelEndpoint.azureOpenAI]: openAIModels, [EModelEndpoint.openAI]: aggregateModels, + [EModelEndpoint.agents]: aggregateModels, [EModelEndpoint.custom]: aggregateModels, [EModelEndpoint.google]: googleModels, [EModelEndpoint.anthropic]: anthropicModels, diff --git a/client/package.json b/client/package.json index 589d9da5c59..9ea26134598 100644 --- a/client/package.json +++ b/client/package.json @@ -135,7 +135,7 @@ "tailwindcss": "^3.4.1", "ts-jest": "^29.1.0", "typescript": "^5.0.4", - "vite": "^5.4.2", + "vite": "^5.1.1", "vite-plugin-node-polyfills": "^0.17.0", "vite-plugin-pwa": "^0.19.8" } diff --git a/client/src/Providers/AgentsContext.tsx b/client/src/Providers/AgentsContext.tsx new file mode 100644 index 00000000000..e793a3f0873 --- /dev/null +++ b/client/src/Providers/AgentsContext.tsx @@ -0,0 +1,27 @@ +import { useForm, FormProvider } from 'react-hook-form'; +import { createContext, useContext } from 'react'; +import { defaultAgentFormValues } from 'librechat-data-provider'; +import type { UseFormReturn } from 'react-hook-form'; +import type { AgentForm } from '~/common'; + +type AgentsContextType = UseFormReturn; + +export const AgentsContext = createContext({} as AgentsContextType); + +export function useAgentsContext() { + const context = useContext(AgentsContext); + + if (context === undefined) { + throw new Error('useAgentsContext must be used within an AgentsProvider'); + } + + return context; +} + +export default function AgentsProvider({ children }) { + const methods = useForm({ + defaultValues: defaultAgentFormValues, + }); + + return {children}; +} diff --git a/client/src/Providers/AgentsMapContext.tsx b/client/src/Providers/AgentsMapContext.tsx new file mode 100644 index 00000000000..904d74754d6 --- /dev/null +++ b/client/src/Providers/AgentsMapContext.tsx @@ -0,0 +1,6 @@ +import { createContext, useContext } from 'react'; +import useAgentsMap from '~/hooks/Agents/useAgentsMap'; +type AgentsMapContextType = ReturnType; + +export const AgentsMapContext = createContext({} as AgentsMapContextType); +export const useAgentsMapContext = () => useContext(AgentsMapContext); diff --git a/client/src/Providers/index.ts b/client/src/Providers/index.ts index 3ac7e1ba05b..be9036a51ca 100644 --- a/client/src/Providers/index.ts +++ b/client/src/Providers/index.ts @@ -1,5 +1,6 @@ export { default as ToastProvider } from './ToastContext'; export { default as AssistantsProvider } from './AssistantsContext'; +export { default as AgentsProvider } from './AgentsContext'; export * from './ChatContext'; export * from './ShareContext'; export * from './ToastContext'; @@ -10,5 +11,7 @@ export * from './ChatFormContext'; export * from './BookmarkContext'; export * from './DashboardContext'; export * from './AssistantsContext'; +export * from './AgentsContext'; export * from './AssistantsMapContext'; export * from './AnnouncerContext'; +export * from './AgentsMapContext'; diff --git a/client/src/common/agents-types.ts b/client/src/common/agents-types.ts new file mode 100644 index 00000000000..eaf64f4c6cb --- /dev/null +++ b/client/src/common/agents-types.ts @@ -0,0 +1,27 @@ +import { Capabilities } from 'librechat-data-provider'; +import type { Agent, AgentProvider, AgentModelParameters } from 'librechat-data-provider'; +import type { Option, ExtendedFile } from './types'; + +export type TAgentOption = Option & + Agent & { + files?: Array<[string, ExtendedFile]>; + code_files?: Array<[string, ExtendedFile]>; + }; + +export type AgentCapabilities = { + [Capabilities.code_interpreter]: boolean; + [Capabilities.image_vision]: boolean; + [Capabilities.retrieval]: boolean; +}; + +export type AgentForm = { + agent?: TAgentOption; + id: string; + name: string | null; + description: string | null; + instructions: string | null; + model: string | null; + model_parameters: AgentModelParameters; + tools?: string[]; + provider?: AgentProvider | Option; +} & AgentCapabilities; diff --git a/client/src/common/assistants-types.ts b/client/src/common/assistants-types.ts index a4c077d96b7..4e6f4d23b26 100644 --- a/client/src/common/assistants-types.ts +++ b/client/src/common/assistants-types.ts @@ -1,7 +1,9 @@ -import { Capabilities } from 'librechat-data-provider'; -import type { Assistant } from 'librechat-data-provider'; +import { Capabilities, EModelEndpoint } from 'librechat-data-provider'; +import type { Assistant, AssistantsEndpoint } from 'librechat-data-provider'; import type { Option, ExtendedFile } from './types'; +export type ActionsEndpoint = AssistantsEndpoint | EModelEndpoint.agents; + export type TAssistantOption = | string | (Option & diff --git a/client/src/common/index.ts b/client/src/common/index.ts index 2d56ecef517..29739c7bd8f 100644 --- a/client/src/common/index.ts +++ b/client/src/common/index.ts @@ -1,3 +1,4 @@ export * from './artifacts'; export * from './types'; export * from './assistants-types'; +export * from './agents-types'; diff --git a/client/src/common/types.ts b/client/src/common/types.ts index 33d24b5fb75..bf003fbf68e 100644 --- a/client/src/common/types.ts +++ b/client/src/common/types.ts @@ -6,6 +6,7 @@ import type { SetterOrUpdater } from 'recoil'; import type { TRole, TUser, + Agent, Action, TPreset, TPlugin, @@ -18,7 +19,9 @@ import type { TConversation, TStartupConfig, EModelEndpoint, + TEndpointsConfig, ActionMetadata, + AssistantDocument, AssistantsEndpoint, TMessageContentParts, AuthorizationTypeEnum, @@ -66,6 +69,12 @@ export type AssistantListItem = { model: string; }; +export type AgentListItem = { + id: string; + name: string; + avatar: Agent['avatar']; +}; + export type TPluginMap = Record; export type GenericSetter = (value: T | ((currentValue: T) => T)) => void; @@ -92,10 +101,13 @@ export type IconMapProps = { context?: 'landing' | 'menu-item' | 'nav' | 'message'; endpoint?: string | null; assistantName?: string; + agentName?: string; avatar?: string; size?: number; }; +export type AgentIconMapProps = IconMapProps & { agentName: string }; + export type NavLink = { title: string; label?: string; @@ -124,6 +136,7 @@ export interface DataColumnMeta { export enum Panel { builder = 'builder', actions = 'actions', + model = 'model', } export type FileSetter = @@ -159,11 +172,30 @@ export type AssistantPanelProps = { activePanel?: string; endpoint: AssistantsEndpoint; version: number | string; + documentsMap: Map | null; setAction: React.Dispatch>; setCurrentAssistantId: React.Dispatch>; setActivePanel: React.Dispatch>; }; +export type AgentPanelProps = { + index?: number; + agent_id?: string; + activePanel?: string; + action?: Action; + actions?: Action[]; + setActivePanel: React.Dispatch>; + setAction: React.Dispatch>; + endpointsConfig?: TEndpointsConfig; + setCurrentAgentId: React.Dispatch>; +}; + +export type AgentModelPanelProps = { + setActivePanel: React.Dispatch>; + providers: Option[]; + models: Record; +}; + export type AugmentedColumnDef = ColumnDef & DataColumnMeta; export type TSetOption = SetOption; @@ -385,6 +417,7 @@ export type IconProps = Pick & endpoint?: EModelEndpoint | string | null; endpointType?: EModelEndpoint | null; assistantName?: string; + agentName?: string; error?: boolean; }; @@ -407,6 +440,7 @@ export type TMessageAudio = { }; export type OptionWithIcon = Option & { icon?: React.ReactNode }; +export type DropdownValueSetter = (value: string | Option | OptionWithIcon) => void; export type MentionOption = OptionWithIcon & { type: string; value: string; diff --git a/client/src/components/Chat/Input/CircleRender.tsx b/client/src/components/Chat/Input/CircleRender.tsx new file mode 100644 index 00000000000..88b79476330 --- /dev/null +++ b/client/src/components/Chat/Input/CircleRender.tsx @@ -0,0 +1,36 @@ +import React from 'react'; +import { CircleIcon, CircleDotsIcon } from '~/components/svg'; +import { ECallState } from 'librechat-data-provider'; + +const CircleRender = ({ rmsLevel, isCameraOn, state }) => { + const getIconComponent = (state) => { + switch (state) { + case ECallState.Thinking: + return ; + default: + return ( +
+ +
+ ); + } + }; + + const baseScale = isCameraOn ? 0.5 : 1; + const scaleMultiplier = + rmsLevel > 0.08 + ? 1.8 + : rmsLevel > 0.07 + ? 1.6 + : rmsLevel > 0.05 + ? 1.4 + : rmsLevel > 0.01 + ? 1.2 + : 1; + + const transformScale = baseScale * scaleMultiplier; + + return getIconComponent(state); +}; + +export default CircleRender; diff --git a/client/src/components/Chat/Input/Files/FileRow.tsx b/client/src/components/Chat/Input/Files/FileRow.tsx index e7887a7259c..80bf24b10b4 100644 --- a/client/src/components/Chat/Input/Files/FileRow.tsx +++ b/client/src/components/Chat/Input/Files/FileRow.tsx @@ -11,6 +11,8 @@ export default function FileRow({ setFiles, setFilesLoading, assistant_id, + // TODO: Agent file handling + agent_id, tool_resource, fileFilter, isRTL, @@ -21,6 +23,7 @@ export default function FileRow({ setFilesLoading: React.Dispatch>; fileFilter?: (file: ExtendedFile) => boolean; assistant_id?: string; + agent_id?: string; tool_resource?: EToolResources; isRTL?: boolean; Wrapper?: React.FC<{ children: React.ReactNode }>; diff --git a/client/src/components/Chat/Menus/Endpoints/Icons.tsx b/client/src/components/Chat/Menus/Endpoints/Icons.tsx index 4e88cceef47..913fc5b3f80 100644 --- a/client/src/components/Chat/Menus/Endpoints/Icons.tsx +++ b/client/src/components/Chat/Menus/Endpoints/Icons.tsx @@ -1,5 +1,6 @@ import { EModelEndpoint } from 'librechat-data-provider'; -import type { IconMapProps } from '~/common'; +import type { IconMapProps, AgentIconMapProps } from '~/common'; +import { BrainCircuit } from 'lucide-react'; import { MinimalPlugin, GPTIcon, @@ -33,6 +34,24 @@ const AssistantAvatar = ({ className = '', assistantName, avatar, size }: IconMa return ; }; +const AgentAvatar = ({ className = '', agentName, avatar, size }: AgentIconMapProps) => { + if (agentName && avatar) { + return ( + {agentName} + ); + } else if (agentName) { + return ; + } + + return ; +}; + export const icons = { [EModelEndpoint.azureOpenAI]: AzureMinimalIcon, [EModelEndpoint.openAI]: GPTIcon, @@ -44,5 +63,6 @@ export const icons = { [EModelEndpoint.custom]: CustomMinimalIcon, [EModelEndpoint.assistants]: AssistantAvatar, [EModelEndpoint.azureAssistants]: AssistantAvatar, + [EModelEndpoint.agents]: AgentAvatar, unknown: UnknownIcon, }; diff --git a/client/src/components/Chat/Messages/Content/ContentParts.tsx b/client/src/components/Chat/Messages/Content/ContentParts.tsx index 4a8a3231980..3227bce0723 100644 --- a/client/src/components/Chat/Messages/Content/ContentParts.tsx +++ b/client/src/components/Chat/Messages/Content/ContentParts.tsx @@ -28,7 +28,7 @@ any) => { return ( { @@ -31,6 +29,10 @@ const DisplayMessage = ({ text, isCreatedByUser = false, message, showCursor }: () => message.messageId === latestMessage?.messageId, [message.messageId, latestMessage?.messageId], ); + + // Note: for testing purposes + // isSubmitting && isLatestMessage && logger.log('message_stream', { text, isCreatedByUser, isSubmitting, showCursorState }); + return (
; } else if (part.type === ContentTypes.TEXT) { - // Access the value property + const text = typeof part.text === 'string' ? part.text : part.text.value; + if (typeof text !== 'string') { + return null; + } return ( ); - } else if ( - part.type === ContentTypes.TOOL_CALL && - part[ContentTypes.TOOL_CALL].type === ToolCallTypes.CODE_INTERPRETER - ) { - const toolCall = part[ContentTypes.TOOL_CALL]; - const code_interpreter = toolCall[ToolCallTypes.CODE_INTERPRETER]; - return ( - - ); - } else if ( - part.type === ContentTypes.TOOL_CALL && - (part[ContentTypes.TOOL_CALL].type === ToolCallTypes.RETRIEVAL || - part[ContentTypes.TOOL_CALL].type === ToolCallTypes.FILE_SEARCH) - ) { - const toolCall = part[ContentTypes.TOOL_CALL]; - return ; - } else if ( - part.type === ContentTypes.TOOL_CALL && - part[ContentTypes.TOOL_CALL].type === ToolCallTypes.FUNCTION && - imageGenTools.has(part[ContentTypes.TOOL_CALL].function.name) - ) { - const toolCall = part[ContentTypes.TOOL_CALL]; - return ( - - ); - } else if ( - part.type === ContentTypes.TOOL_CALL && - part[ContentTypes.TOOL_CALL].type === ToolCallTypes.FUNCTION - ) { + } else if (part.type === ContentTypes.TOOL_CALL) { const toolCall = part[ContentTypes.TOOL_CALL]; - if (isImageVisionTool(toolCall)) { - if (isSubmitting && showCursor) { - return ( - - - - ); - } + if (!toolCall) { return null; } - return ( - - ); + if ('args' in toolCall && (!toolCall.type || toolCall.type === ToolCallTypes.TOOL_CALL)) { + return ( + + ); + } else if (toolCall.type === ToolCallTypes.CODE_INTERPRETER) { + const code_interpreter = toolCall[ToolCallTypes.CODE_INTERPRETER]; + return ( + + ); + } else if ( + toolCall.type === ToolCallTypes.RETRIEVAL || + toolCall.type === ToolCallTypes.FILE_SEARCH + ) { + return ( + + ); + } else if ( + toolCall.type === ToolCallTypes.FUNCTION && + ToolCallTypes.FUNCTION in toolCall && + imageGenTools.has(toolCall.function.name) + ) { + return ( + + ); + } else if (toolCall.type === ToolCallTypes.FUNCTION && ToolCallTypes.FUNCTION in toolCall) { + if (isImageVisionTool(toolCall)) { + if (isSubmitting && showCursor) { + return ( + + + + ); + } + return null; + } + + return ( + + ); + } } else if (part.type === ContentTypes.IMAGE_FILE) { const imageFile = part[ContentTypes.IMAGE_FILE]; const height = imageFile.height ?? 1920; @@ -153,8 +169,6 @@ export default function Part({ height: height + 'px', width: width + 'px', }} - // n={imageFiles.length} - // i={i} /> ); } diff --git a/client/src/components/Chat/Messages/Content/ToolCall.tsx b/client/src/components/Chat/Messages/Content/ToolCall.tsx index fc1da37fbef..fce15d84a89 100644 --- a/client/src/components/Chat/Messages/Content/ToolCall.tsx +++ b/client/src/components/Chat/Messages/Content/ToolCall.tsx @@ -1,4 +1,4 @@ -// import { useState, useEffect } from 'react'; +import { useMemo } from 'react'; import { actionDelimiter, actionDomainSeparator, Constants } from 'librechat-data-provider'; import * as Popover from '@radix-ui/react-popover'; import useLocalize from '~/hooks/useLocalize'; @@ -11,18 +11,19 @@ import ToolPopover from './ToolPopover'; // import ActionIcon from './ActionIcon'; import WrenchIcon from './WrenchIcon'; import { useProgress } from '~/hooks'; +import { logger } from '~/utils'; export default function ToolCall({ initialProgress = 0.1, isSubmitting, name, - args = '', + args: _args = '', output, }: { initialProgress: number; isSubmitting: boolean; name: string; - args: string; + args: string | Record; output?: string | null; }) { const localize = useLocalize(); @@ -35,6 +36,27 @@ export default function ToolCall({ const domain = _domain?.replaceAll(actionDomainSeparator, '.') ?? null; const error = output?.toLowerCase()?.includes('error processing tool'); + const args = useMemo(() => { + if (typeof _args === 'string') { + return _args; + } + + try { + return JSON.stringify(_args, null, 2); + } catch (e) { + logger.error( + 'client/src/components/Chat/Messages/Content/ToolCall.tsx - Failed to stringify args', + e, + ); + return ''; + } + }, [_args]); + + const hasInfo = useMemo( + () => (args?.length || 0) > 0 || (output?.length || 0) > 0, + [args, output], + ); + return (
@@ -67,10 +89,10 @@ export default function ToolCall({ ? localize('com_assistants_completed_action', domain) : localize('com_assistants_completed_function', function_name) } - hasInput={!!args?.length} + hasInput={hasInfo} popover={true} /> - {!!args?.length && ( + {hasInfo && ( )}
diff --git a/client/src/components/Chat/Messages/MessageIcon.tsx b/client/src/components/Chat/Messages/MessageIcon.tsx index ce09b6ca269..e4c06a4a1d9 100644 --- a/client/src/components/Chat/Messages/MessageIcon.tsx +++ b/client/src/components/Chat/Messages/MessageIcon.tsx @@ -1,6 +1,6 @@ import React, { useMemo, memo } from 'react'; import { useGetEndpointsQuery } from 'librechat-data-provider/react-query'; -import type { TMessage, TPreset, Assistant } from 'librechat-data-provider'; +import type { TMessage, TPreset, Assistant, Agent } from 'librechat-data-provider'; import type { TMessageProps } from '~/common'; import ConvoIconURL from '~/components/Endpoints/ConvoIconURL'; import { getEndpointField, getIconEndpoint } from '~/utils'; @@ -10,15 +10,26 @@ const MessageIcon = memo( ( props: Pick & { assistant?: Assistant; + agent?: Agent; }, ) => { const { data: endpointsConfig } = useGetEndpointsQuery(); - const { message, conversation, assistant } = props; + const { message, conversation, assistant, agent } = props; const assistantName = useMemo(() => assistant?.name ?? '', [assistant]); const assistantAvatar = useMemo(() => assistant?.metadata?.avatar ?? '', [assistant]); + const agentName = useMemo(() => props.agent?.name ?? '', [props.agent]); + const agentAvatar = useMemo(() => props.agent?.avatar?.filepath ?? '', [props.agent]); const isCreatedByUser = useMemo(() => message?.isCreatedByUser ?? false, [message]); + let avatarURL = ''; + + if (assistant) { + avatarURL = assistantAvatar; + } else if (agent) { + avatarURL = agentAvatar; + } + const messageSettings = useMemo( () => ({ ...(conversation ?? {}), @@ -47,8 +58,10 @@ const MessageIcon = memo( preset={messageSettings as typeof messageSettings & TPreset} context="message" assistantAvatar={assistantAvatar} + agentAvatar={agentAvatar} endpointIconURL={endpointIconURL} assistantName={assistantName} + agentName={agentName} /> ); } @@ -57,9 +70,10 @@ const MessageIcon = memo( ); diff --git a/client/src/components/Chat/Messages/MessageParts.tsx b/client/src/components/Chat/Messages/MessageParts.tsx index af97f9512c8..37794c44f50 100644 --- a/client/src/components/Chat/Messages/MessageParts.tsx +++ b/client/src/components/Chat/Messages/MessageParts.tsx @@ -20,6 +20,7 @@ export default function Message(props: TMessageProps) { ask, edit, index, + agent, isLast, enterEdit, assistant, @@ -38,6 +39,16 @@ export default function Message(props: TMessageProps) { return null; } + let name = ''; + + if (isCreatedByUser === true) { + name = localize('com_user_message'); + } else if (assistant) { + name = assistant.name ?? localize('com_ui_assistant'); + } else if (agent) { + name = agent.name ?? localize('com_ui_agent'); + } + return ( <>
- +
@@ -62,11 +78,7 @@ export default function Message(props: TMessageProps) { isCreatedByUser === true ? '' : 'agent-turn', )} > -
- {isCreatedByUser === true - ? localize('com_user_message') - : (assistant && assistant.name) ?? localize('com_ui_assistant')} -
+
{name}
= ({ endpointIconURL, assistantAvatar, assistantName, + agentAvatar, + agentName, context, }) => { const { iconURL = '' } = preset ?? {}; @@ -71,7 +75,8 @@ const ConvoIconURL: React.FC = ({ className="h-2/3 w-2/3" iconURL={endpointIconURL} assistantName={assistantName} - avatar={assistantAvatar} + avatar={assistantAvatar || agentAvatar} + agentName={agentName} />
); diff --git a/client/src/components/Endpoints/MessageEndpointIcon.tsx b/client/src/components/Endpoints/MessageEndpointIcon.tsx index 98a884f4417..fc084bbb55b 100644 --- a/client/src/components/Endpoints/MessageEndpointIcon.tsx +++ b/client/src/components/Endpoints/MessageEndpointIcon.tsx @@ -1,5 +1,6 @@ import { EModelEndpoint, isAssistantsEndpoint } from 'librechat-data-provider'; import UnknownIcon from '~/components/Chat/Menus/Endpoints/UnknownIcon'; +import { BrainCircuit } from 'lucide-react'; import { Plugin, GPTIcon, @@ -25,6 +26,7 @@ const MessageEndpointIcon: React.FC = (props) => { size = 30, model = '', assistantName, + agentName, } = props; const assistantsIcon = { @@ -56,8 +58,38 @@ const MessageEndpointIcon: React.FC = (props) => { name: endpoint, }; + const agentsIcon = { + icon: props.iconURL ? ( +
+
+ {agentName} +
+
+ ) : ( +
+
+ +
+
+ ), + name: endpoint, + }; + const endpointIcons = { [EModelEndpoint.assistants]: assistantsIcon, + [EModelEndpoint.agents]: agentsIcon, [EModelEndpoint.azureAssistants]: assistantsIcon, [EModelEndpoint.azureOpenAI]: { icon: , diff --git a/client/src/components/Endpoints/MinimalIcon.tsx b/client/src/components/Endpoints/MinimalIcon.tsx index 1bb200ced3b..80ab657a83a 100644 --- a/client/src/components/Endpoints/MinimalIcon.tsx +++ b/client/src/components/Endpoints/MinimalIcon.tsx @@ -1,4 +1,5 @@ import { EModelEndpoint } from 'librechat-data-provider'; +import { BrainCircuit } from 'lucide-react'; import UnknownIcon from '~/components/Chat/Menus/Endpoints/UnknownIcon'; import { AzureMinimalIcon, @@ -46,6 +47,7 @@ const MinimalIcon: React.FC = (props) => { [EModelEndpoint.chatGPTBrowser]: { icon: , name: 'ChatGPT' }, [EModelEndpoint.assistants]: { icon: , name: 'Assistant' }, [EModelEndpoint.azureAssistants]: { icon: , name: 'Assistant' }, + [EModelEndpoint.agents]: { icon: , name: 'Agent' }, default: { icon: ( } = { [EModelEndpoint.assistants]: AssistantsSettings, [EModelEndpoint.azureAssistants]: AssistantsSettings, + [EModelEndpoint.agents]: OpenAISettings, [EModelEndpoint.openAI]: OpenAISettings, [EModelEndpoint.custom]: OpenAISettings, [EModelEndpoint.azureOpenAI]: OpenAISettings, diff --git a/client/src/components/Prompts/Groups/CategorySelector.tsx b/client/src/components/Prompts/Groups/CategorySelector.tsx index 79be1347e22..61c30c6c76b 100644 --- a/client/src/components/Prompts/Groups/CategorySelector.tsx +++ b/client/src/components/Prompts/Groups/CategorySelector.tsx @@ -2,8 +2,8 @@ import React, { useMemo } from 'react'; import { useFormContext, Controller } from 'react-hook-form'; import { LocalStorageKeys } from 'librechat-data-provider'; import { useLocalize, useCategories } from '~/hooks'; +import { cn, createDropdownSetter } from '~/utils'; import { SelectDropDown } from '~/components/ui'; -import { cn } from '~/utils'; const CategorySelector = ({ currentCategory, @@ -37,11 +37,11 @@ const CategorySelector = ({ title="Category" tabIndex={tabIndex} value={categoryOption || ''} - setValue={(value) => { + setValue={createDropdownSetter((value: string) => { setValue('category', value, { shouldDirty: false }); localStorage.setItem(LocalStorageKeys.LAST_PROMPT_CATEGORY, value); onValueChange?.(value); - }} + })} availableValues={categories} showAbove={false} showLabel={false} diff --git a/client/src/components/Share/MessageIcon.tsx b/client/src/components/Share/MessageIcon.tsx index 00beb84a686..e4c1b9dca86 100644 --- a/client/src/components/Share/MessageIcon.tsx +++ b/client/src/components/Share/MessageIcon.tsx @@ -1,5 +1,5 @@ import { useMemo } from 'react'; -import type { TMessage, TPreset, Assistant } from 'librechat-data-provider'; +import type { TMessage, TPreset, Assistant, Agent } from 'librechat-data-provider'; import type { TMessageProps } from '~/common'; import MessageEndpointIcon from '../Endpoints/MessageEndpointIcon'; import ConvoIconURL from '~/components/Endpoints/ConvoIconURL'; @@ -9,12 +9,15 @@ import { UserIcon } from '../svg'; export default function MessageIcon( props: Pick & { assistant?: false | Assistant; + agent?: false | Agent; }, ) { - const { message, conversation, assistant } = props; + const { message, conversation, assistant, agent } = props; const assistantName = assistant ? (assistant.name as string | undefined) : ''; const assistantAvatar = assistant ? (assistant.metadata?.avatar as string | undefined) : ''; + const agentName = agent ? (agent.name as string | undefined) : ''; + const agentAvatar = agent ? (agent.metadata?.avatar as string | undefined) : ''; const messageSettings = useMemo( () => ({ @@ -38,6 +41,8 @@ export default function MessageIcon( context="message" assistantAvatar={assistantAvatar} assistantName={assistantName} + agentAvatar={agentAvatar} + agentName={agentName} /> ); } @@ -65,6 +70,7 @@ export default function MessageIcon( iconURL={!assistant ? undefined : assistantAvatar} model={message?.model ?? conversation?.model} assistantName={assistantName} + agentName={agentName} size={28.8} /> ); diff --git a/client/src/components/SidePanel/AgentSwitcher.tsx b/client/src/components/SidePanel/AgentSwitcher.tsx new file mode 100644 index 00000000000..514e3939fe1 --- /dev/null +++ b/client/src/components/SidePanel/AgentSwitcher.tsx @@ -0,0 +1,87 @@ +import { useEffect, useMemo } from 'react'; +import { EModelEndpoint, isAgentsEndpoint, LocalStorageKeys } from 'librechat-data-provider'; +import type { Agent } from 'librechat-data-provider'; +import type { SwitcherProps, OptionWithIcon } from '~/common'; +import { useSetIndexOptions, useSelectAgent, useLocalize } from '~/hooks'; +import { useChatContext, useAgentsMapContext } from '~/Providers'; +import ControlCombobox from '~/components/ui/ControlCombobox'; +import Icon from '~/components/Endpoints/Icon'; + +export default function AgentSwitcher({ isCollapsed }: SwitcherProps) { + const localize = useLocalize(); + const { setOption } = useSetIndexOptions(); + const { index, conversation } = useChatContext(); + const { agent_id: selectedAgentId = null, endpoint } = conversation ?? {}; + + const agentsMapResult = useAgentsMapContext(); + + const agentsMap = useMemo(() => { + return agentsMapResult ?? {}; + }, [agentsMapResult]); + + const { onSelect } = useSelectAgent(); + + const agents: Agent[] = useMemo(() => { + return Object.values(agentsMap) as Agent[]; + }, [agentsMap]); + + useEffect(() => { + if (selectedAgentId == null && agents.length > 0) { + let agent_id = localStorage.getItem(`${LocalStorageKeys.AGENT_ID_PREFIX}${index}`); + if (agent_id == null) { + agent_id = agents[0].id; + } + const agent = agentsMap[agent_id]; + + if (agent !== undefined && isAgentsEndpoint(endpoint as string) === true) { + setOption('model')(''); + setOption('agent_id')(agent_id); + } + } + }, [index, agents, selectedAgentId, agentsMap, endpoint, setOption]); + + const currentAgent = agentsMap[selectedAgentId ?? '']; + + const agentOptions: OptionWithIcon[] = useMemo( + () => + agents.map((agent: Agent) => { + return { + label: agent.name ?? '', + value: agent.id, + icon: ( + + ), + }; + }), + [agents], + ); + + return ( + agent.id === selectedAgentId)?.name ?? + localize('com_sidepanel_select_agent') + } + selectPlaceholder={localize('com_sidepanel_select_agent')} + searchPlaceholder={localize('com_agents_search_name')} + isCollapsed={isCollapsed} + ariaLabel={'agent'} + setValue={onSelect} + items={agentOptions} + SelectIcon={ + + } + /> + ); +} diff --git a/client/src/components/SidePanel/Agents/ActionsAuth.tsx b/client/src/components/SidePanel/Agents/ActionsAuth.tsx new file mode 100644 index 00000000000..73106855639 --- /dev/null +++ b/client/src/components/SidePanel/Agents/ActionsAuth.tsx @@ -0,0 +1,296 @@ +import { useFormContext } from 'react-hook-form'; +import * as RadioGroup from '@radix-ui/react-radio-group'; +import * as DialogPrimitive from '@radix-ui/react-dialog'; +import { + AuthTypeEnum, + AuthorizationTypeEnum, + TokenExchangeMethodEnum, +} from 'librechat-data-provider'; +import { DialogContent } from '~/components/ui/'; + +export default function ActionsAuth({ + setOpenAuthDialog, +}: { + setOpenAuthDialog: React.Dispatch>; +}) { + const { watch, setValue, trigger } = useFormContext(); + const type = watch('type'); + return ( + +
+
+
+
+

+ Authentication +

+
+
+
+
+
+
+ + setValue('type', value)} + value={type} + role="radiogroup" + aria-required="false" + dir="ltr" + className="flex gap-4" + tabIndex={0} + style={{ outline: 'none' }} + > +
+ +
+
+ +
+
+ +
+
+
+ {type === 'none' ? null : type === 'service_http' ? : } + {/* Cancel/Save */} +
+ + +
Cancel
+
+
+
+
+ ); +} + +const ApiKey = () => { + const { register, watch, setValue } = useFormContext(); + const authorization_type = watch('authorization_type'); + const type = watch('type'); + return ( + <> + + + + setValue('authorization_type', value)} + value={authorization_type} + role="radiogroup" + aria-required="true" + dir="ltr" + className="mb-2 flex gap-6 overflow-hidden rounded-lg" + tabIndex={0} + style={{ outline: 'none' }} + > +
+ +
+
+ +
+
+ +
+
+ {authorization_type === AuthorizationTypeEnum.Custom && ( +
+ + +
+ )} + + ); +}; + +const OAuth = () => { + const { register, watch, setValue } = useFormContext(); + const token_exchange_method = watch('token_exchange_method'); + const type = watch('type'); + return ( + <> + + + + + + + + + + + + setValue('token_exchange_method', value)} + value={token_exchange_method} + role="radiogroup" + aria-required="true" + dir="ltr" + tabIndex={0} + style={{ outline: 'none' }} + > +
+ +
+
+ +
+
+ + ); +}; diff --git a/client/src/components/SidePanel/Agents/ActionsInput.tsx b/client/src/components/SidePanel/Agents/ActionsInput.tsx new file mode 100644 index 00000000000..f1bc3aecd10 --- /dev/null +++ b/client/src/components/SidePanel/Agents/ActionsInput.tsx @@ -0,0 +1,285 @@ +import debounce from 'lodash/debounce'; +import { useState, useEffect } from 'react'; +import { useFormContext } from 'react-hook-form'; +import { + validateAndParseOpenAPISpec, + openapiToFunction, + AuthTypeEnum, +} from 'librechat-data-provider'; +import type { + Action, + FunctionTool, + ActionMetadata, + ValidationResult, +} from 'librechat-data-provider'; +import type { ActionAuthForm } from '~/common'; +import type { Spec } from './ActionsTable'; +import { ActionsTable, columns } from './ActionsTable'; +import { useUpdateAgentAction } from '~/data-provider'; +import { cn, removeFocusOutlines } from '~/utils'; +import { useToastContext } from '~/Providers'; +import useLocalize from '~/hooks/useLocalize'; +import { Spinner } from '~/components/svg'; + +const debouncedValidation = debounce( + (input: string, callback: (result: ValidationResult) => void) => { + const result = validateAndParseOpenAPISpec(input); + callback(result); + }, + 800, +); + +export default function ActionsInput({ + action, + agent_id, + setAction, +}: { + action?: Action; + agent_id?: string; + setAction: React.Dispatch>; +}) { + const handleResult = (result: ValidationResult) => { + if (!result.status) { + setData(null); + setFunctions(null); + } + setValidationResult(result); + }; + + const localize = useLocalize(); + const { showToast } = useToastContext(); + const { handleSubmit, reset } = useFormContext(); + const [validationResult, setValidationResult] = useState(null); + const [inputValue, setInputValue] = useState(''); + + const [data, setData] = useState(null); + const [functions, setFunctions] = useState(null); + + useEffect(() => { + if (!action?.metadata?.raw_spec) { + return; + } + setInputValue(action.metadata.raw_spec); + debouncedValidation(action.metadata.raw_spec, handleResult); + }, [action?.metadata?.raw_spec]); + + useEffect(() => { + if (!validationResult || !validationResult.status || !validationResult.spec) { + return; + } + + const { functionSignatures, requestBuilders } = openapiToFunction(validationResult.spec); + const specs = Object.entries(requestBuilders).map(([name, props]) => { + return { + name, + method: props.method, + path: props.path, + domain: props.domain, + }; + }); + + setData(specs); + setValidationResult(null); + setFunctions(functionSignatures.map((f) => f.toObjectTool())); + }, [validationResult]); + + const updateAgentAction = useUpdateAgentAction({ + onSuccess(data) { + showToast({ + message: localize('com_assistants_update_actions_success'), + status: 'success', + }); + reset(); + setAction(data[1]); + }, + onError(error) { + showToast({ + message: (error as Error)?.message ?? localize('com_assistants_update_actions_error'), + status: 'error', + }); + }, + }); + + const saveAction = handleSubmit((authFormData) => { + console.log('authFormData', authFormData); + if (!agent_id) { + // alert user? + return; + } + + if (!functions) { + return; + } + + if (!data) { + return; + } + + let { metadata = {} } = action ?? {}; + const action_id = action?.action_id; + metadata.raw_spec = inputValue; + const parsedUrl = new URL(data[0].domain); + const domain = parsedUrl.hostname; + if (!domain) { + // alert user? + return; + } + metadata.domain = domain; + + const { type, saved_auth_fields } = authFormData; + + const removeSensitiveFields = (obj: ActionMetadata) => { + delete obj.auth; + delete obj.api_key; + delete obj.oauth_client_id; + delete obj.oauth_client_secret; + }; + + if (saved_auth_fields && type === AuthTypeEnum.ServiceHttp) { + metadata = { + ...metadata, + api_key: authFormData.api_key, + auth: { + type, + authorization_type: authFormData.authorization_type, + custom_auth_header: authFormData.custom_auth_header, + }, + }; + } else if (saved_auth_fields && type === AuthTypeEnum.OAuth) { + metadata = { + ...metadata, + auth: { + type, + authorization_url: authFormData.authorization_url, + client_url: authFormData.client_url, + scope: authFormData.scope, + token_exchange_method: authFormData.token_exchange_method, + }, + oauth_client_id: authFormData.oauth_client_id, + oauth_client_secret: authFormData.oauth_client_secret, + }; + } else if (saved_auth_fields) { + removeSensitiveFields(metadata); + metadata.auth = { + type, + }; + } else { + removeSensitiveFields(metadata); + } + + updateAgentAction.mutate({ + action_id, + metadata, + functions, + agent_id, + }); + }); + + const handleInputChange: React.ChangeEventHandler = (event) => { + const newValue = event.target.value; + setInputValue(newValue); + if (!newValue) { + setData(null); + setFunctions(null); + return setValidationResult(null); + } + debouncedValidation(newValue, handleResult); + }; + + return ( + <> +
+
+ +
+ {/* */} + +
+
+
+
+