-
Notifications
You must be signed in to change notification settings - Fork 0
/
proxy.py
54 lines (42 loc) · 1.47 KB
/
proxy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
from common import LLM_API, LLM_MODEL
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
# Interpreter 库已经正确安装和配置
from interpreter import interpreter
app = FastAPI()
# interprete configs
interpreter.llm.model = LLM_MODEL
interpreter.llm.api_base = LLM_API
interpreter.offline = True # Disables online features like Open Procedures
interpreter.auto_run = True
interpreter.loop = False
interpreter.verbose = True
interpreter.llm.context_window = 1600
interpreter.llm.max_tokens = 100
interpreter.llm.max_output = 100
print(interpreter.system_message)
# (Tip: Do this before adding/removing languages, otherwise OI might retain the state of previous languages:)
interpreter.computer.terminate()
class RequestModel(BaseModel):
text: str
@app.post("/talk")
async def talk(request: Request):
try:
data = await request.body()
print(data)
# use interpreter deal with text
result = interpreter.chat(data.decode("utf-8"))
print(result)
if not result:
raise HTTPException(
status_code=500, detail="Interpreter did not return a result"
)
response_text = result[0].get("content")
print(response_text)
return {"response": response_text}
except Exception as e:
print(e)
raise HTTPException(status_code=500, detail=str(e))
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=9888)