From 488170068c0e4c2779f15a6a7746464dc28f45b2 Mon Sep 17 00:00:00 2001 From: LzSkyline Date: Tue, 30 Jul 2024 16:53:08 +0800 Subject: [PATCH] Add Ollama native API to support keep alive parameters (#748) * Add Chinese translation. * Optimize style. * Add Ollama native API to support keep alive parameters. * Optimized popup page style. * fix: Fixed data type for Ollama keep_alive parameter forever --- src/_locales/zh-hans/main.json | 11 ++- src/background/index.mjs | 10 +++ src/components/ConversationItem/index.jsx | 2 +- src/config/index.mjs | 11 +++ src/content-script/index.jsx | 2 +- src/manifest.json | 2 +- src/manifest.v2.json | 2 +- src/popup/sections/AdvancedPart.jsx | 45 +++++++++++++ src/popup/sections/GeneralPart.jsx | 36 ++++++++++ src/popup/styles.scss | 15 +++++ src/services/apis/ollama-api.mjs | 82 +++++++++++++++++++++++ src/utils/fetch-ollama.mjs | 27 ++++++++ src/utils/fetch-sse.mjs | 1 + 13 files changed, 241 insertions(+), 5 deletions(-) create mode 100644 src/services/apis/ollama-api.mjs create mode 100644 src/utils/fetch-ollama.mjs diff --git a/src/_locales/zh-hans/main.json b/src/_locales/zh-hans/main.json index 45db68af..0a1282d1 100644 --- a/src/_locales/zh-hans/main.json +++ b/src/_locales/zh-hans/main.json @@ -77,6 +77,7 @@ "ChatGPT (GPT-4-8k)": "ChatGPT (GPT-4-8k)", "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", + "Ollama API": "Ollama API", "Custom Model": "自定义模型", "Balanced": "平衡", "Creative": "有创造力", @@ -142,5 +143,13 @@ "Icon": "图标", "Prompt Template": "提示模板", "Explain this: {{selection}}": "解释这个: {{selection}}", - "New": "新建" + "New": "新建", + "DisplayMode": "显示方式", + "Display in sidebar": "在侧边栏显示", + "Display in floating toolbar": "在浮动工具栏显示", + "Temperature": "温度", + "keep-alive Time": "保活时间", + "5m": "5分钟", + "30m": "半小时", + "Forever": "永久" } diff --git a/src/background/index.mjs b/src/background/index.mjs index 3dde5695..efbd928e 100644 --- a/src/background/index.mjs +++ b/src/background/index.mjs @@ -10,6 +10,7 @@ import { generateAnswersWithGptCompletionApi, } from '../services/apis/openai-api' import { generateAnswersWithCustomApi } from '../services/apis/custom-api.mjs' +import { generateAnswersWithOllamaApi } from '../services/apis/ollama-api.mjs' import { generateAnswersWithAzureOpenaiApi } from '../services/apis/azure-openai-api.mjs' import { generateAnswersWithClaudeApi } from '../services/apis/claude-api.mjs' import { generateAnswersWithChatGLMApi } from '../services/apis/chatglm-api.mjs' @@ -25,6 +26,7 @@ import { claudeWebModelKeys, moonshotWebModelKeys, customApiModelKeys, + ollamaApiModelKeys, defaultConfig, getUserConfig, githubThirdPartyApiModelKeys, @@ -124,6 +126,14 @@ async function executeApi(session, port, config) { config.customApiKey, config.customModelName, ) + } else if (ollamaApiModelKeys.includes(session.modelName)) { + await generateAnswersWithOllamaApi( + port, + session.question, + session, + config.ollamaApiKey, + config.ollamaModelName, + ) } else if (azureOpenAiApiModelKeys.includes(session.modelName)) { await generateAnswersWithAzureOpenaiApi(port, session.question, session) } else if (claudeApiModelKeys.includes(session.modelName)) { diff --git a/src/components/ConversationItem/index.jsx b/src/components/ConversationItem/index.jsx index 44a5ec27..40792763 100644 --- a/src/components/ConversationItem/index.jsx +++ b/src/components/ConversationItem/index.jsx @@ -5,7 +5,7 @@ import ReadButton from '../ReadButton' import PropTypes from 'prop-types' import MarkdownRender from '../MarkdownRender/markdown.jsx' import { useTranslation } from 'react-i18next' -import { isUsingCustomModel } from '../../config/index.mjs' +import { isUsingCustomModel, isUsingOllamaModel } from '../../config/index.mjs' import { useConfig } from '../../hooks/use-config.mjs' function AnswerTitle({ descName, modelName }) { diff --git a/src/config/index.mjs b/src/config/index.mjs index 36788063..b595f283 100644 --- a/src/config/index.mjs +++ b/src/config/index.mjs @@ -57,6 +57,7 @@ export const chatgptApiModelKeys = [ 'chatgptApi4_128k_0125_preview', ] export const customApiModelKeys = ['customModel'] +export const ollamaApiModelKeys = ['ollamaModel'] export const azureOpenAiApiModelKeys = ['azureOpenAi'] export const claudeApiModelKeys = [ 'claude12Api', @@ -163,6 +164,7 @@ export const Models = { gptApiDavinci: { value: 'text-davinci-003', desc: 'GPT-3.5' }, customModel: { value: '', desc: 'Custom Model' }, + ollamaModel: { value: '', desc: 'Ollama API' }, azureOpenAi: { value: '', desc: 'ChatGPT (Azure)' }, waylaidwandererApi: { value: '', desc: 'Waylaidwanderer API (Github)' }, @@ -249,6 +251,10 @@ export const defaultConfig = { customModelName: 'gpt-3.5-turbo', githubThirdPartyUrl: 'http://127.0.0.1:3000/conversation', + ollamaEndpoint: 'http://127.0.0.1:11434', + ollamaModelName: 'gemma2', + keepAliveTime: '5m', + // advanced maxResponseTokenLength: 1000, @@ -281,6 +287,7 @@ export const defaultConfig = { 'moonshotWebFree', 'chatglmTurbo', 'customModel', + 'ollamaModel', 'azureOpenAi', ], activeSelectionTools: ['translate', 'summary', 'polish', 'code', 'ask'], @@ -381,6 +388,10 @@ export function isUsingCustomModel(configOrSession) { return customApiModelKeys.includes(configOrSession.modelName) } +export function isUsingOllamaModel(configOrSession) { + return ollamaApiModelKeys.includes(configOrSession.modelName) +} + export function isUsingChatGLMApi(configOrSession) { return chatglmApiModelKeys.includes(configOrSession.modelName) } diff --git a/src/content-script/index.jsx b/src/content-script/index.jsx index 62b2b52c..7962d268 100644 --- a/src/content-script/index.jsx +++ b/src/content-script/index.jsx @@ -72,7 +72,7 @@ async function mountComponent(siteConfig, userConfig) { }) const position = { - x: window.innerWidth - 300 - (Math.floor((20 / 100) * window.innerWidth)), + x: window.innerWidth - 300 - Math.floor((20 / 100) * window.innerWidth), y: window.innerHeight / 2 - 200, } const toolbarContainer = createElementAtPosition(position.x, position.y) diff --git a/src/manifest.json b/src/manifest.json index 95a23b35..4acc2ac6 100644 --- a/src/manifest.json +++ b/src/manifest.json @@ -1,7 +1,7 @@ { "name": "ChatGPTBox", "description": "Integrating ChatGPT into your browser deeply, everything you need is here", - "version": "2.5.6", + "version": "2.5.7", "manifest_version": 3, "icons": { "16": "logo.png", diff --git a/src/manifest.v2.json b/src/manifest.v2.json index 80fd9a31..8f2c4968 100644 --- a/src/manifest.v2.json +++ b/src/manifest.v2.json @@ -1,7 +1,7 @@ { "name": "ChatGPTBox", "description": "Integrating ChatGPT into your browser deeply, everything you need is here", - "version": "2.5.6", + "version": "2.5.7", "manifest_version": 2, "icons": { "16": "logo.png", diff --git a/src/popup/sections/AdvancedPart.jsx b/src/popup/sections/AdvancedPart.jsx index 4149528b..4623fd75 100644 --- a/src/popup/sections/AdvancedPart.jsx +++ b/src/popup/sections/AdvancedPart.jsx @@ -1,5 +1,7 @@ +import '../styles.scss' import { useTranslation } from 'react-i18next' import { parseFloatWithClamp, parseIntWithClamp } from '../../utils/index.mjs' +import { isUsingOllamaModel } from '../../config/index.mjs' import PropTypes from 'prop-types' import { Tab, TabList, TabPanel, Tabs } from 'react-tabs' import Browser from 'webextension-polyfill' @@ -56,6 +58,49 @@ function ApiParams({ config, updateConfig }) { }} /> + {isUsingOllamaModel(config) && ( + + )} ) } diff --git a/src/popup/sections/GeneralPart.jsx b/src/popup/sections/GeneralPart.jsx index a280317b..045a9a97 100644 --- a/src/popup/sections/GeneralPart.jsx +++ b/src/popup/sections/GeneralPart.jsx @@ -8,6 +8,7 @@ import { isUsingClaudeApi, isUsingCustomModel, isUsingCustomNameOnlyModel, + isUsingOllamaModel, isUsingGithubThirdPartyApi, isUsingMultiModeModel, ModelMode, @@ -163,6 +164,7 @@ export function GeneralPart({ config, updateConfig }) { isUsingOpenAiApiKey(config) || isUsingMultiModeModel(config) || isUsingCustomModel(config) || + isUsingOllamaModel(config) || isUsingAzureOpenAi(config) || isUsingClaudeApi(config) || isUsingCustomNameOnlyModel(config) || @@ -271,6 +273,18 @@ export function GeneralPart({ config, updateConfig }) { }} /> )} + {isUsingOllamaModel(config) && ( + { + const ollamaModelName = e.target.value + updateConfig({ ollamaModelName: ollamaModelName }) + }} + /> + )} {isUsingAzureOpenAi(config) && ( )} + {isUsingOllamaModel(config) && ( + { + const value = e.target.value + updateConfig({ ollamaEndpoint: value }) + }} + /> + )} + {isUsingOllamaModel(config) && ( + { + const apiKey = e.target.value + updateConfig({ ollamaApiKey: apiKey }) + }} + /> + )} {isUsingAzureOpenAi(config) && ( { + finished = true + pushRecord(session, question, answer) + console.debug('conversation history', { content: session.conversationRecords }) + port.postMessage({ answer: null, done: true, session: session }) + } + await fetchSSE(`${apiUrl}/api/chat`, { + method: 'POST', + signal: controller.signal, + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${apiKey}`, + }, + body: JSON.stringify({ + messages: prompt, + model: modelName, + stream: true, + keep_alive: config.keepAliveTime === '-1' ? -1 : config.keepAliveTime, + }), + onMessage(message) { + console.debug('sse message', message) + if (finished) return + let data = message + const delta = data.message?.content + if (delta) { + answer += delta + port.postMessage({ answer: answer, done: false, session: null }) + } + if (data.done_reason) { + finish() + return + } + }, + async onStart() {}, + async onEnd() { + port.postMessage({ done: true }) + port.onMessage.removeListener(messageListener) + port.onDisconnect.removeListener(disconnectListener) + }, + async onError(resp) { + port.onMessage.removeListener(messageListener) + port.onDisconnect.removeListener(disconnectListener) + if (resp instanceof Error) throw resp + const error = await resp.json().catch(() => ({})) + throw new Error(!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`) + }, + }) +} diff --git a/src/utils/fetch-ollama.mjs b/src/utils/fetch-ollama.mjs new file mode 100644 index 00000000..49fbd331 --- /dev/null +++ b/src/utils/fetch-ollama.mjs @@ -0,0 +1,27 @@ +export async function fetchSSE(resource, options) { + const { onMessage, onStart, onEnd, onError, ...fetchOptions } = options + const resp = await fetch(resource, fetchOptions).catch(async (err) => { + await onError(err) + }) + if (!resp) return + if (!resp.ok) { + await onError(resp) + return + } + let hasStarted = false + const reader = resp.body.getReader() + let result + while (!(result = await reader.read()).done) { + const chunk = result.value + const str = new TextDecoder().decode(chunk) + if (!hasStarted) { + const str = new TextDecoder().decode(chunk) + hasStarted = true + await onStart(str) + } + let data = JSON.parse(str) + onMessage(data) + if (data.done) break + } + await onEnd() +} diff --git a/src/utils/fetch-sse.mjs b/src/utils/fetch-sse.mjs index 059cc043..13df99f7 100644 --- a/src/utils/fetch-sse.mjs +++ b/src/utils/fetch-sse.mjs @@ -33,6 +33,7 @@ export async function fetchSSE(resource, options) { console.debug('not common response', error) } if (fakeSseData) { + console.log('FAKE') parser.feed(new TextEncoder().encode(fakeSseData)) break }