Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update chatbot using api #166

Merged
merged 5 commits into from
Jul 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ FFood is a food website that lets customers order food in a quick and convenient
- View Food by Categories.
- View Food Details.
- Rate Food.
- Chat bot
- Search Food by keyword.
- Search Food by image.
- User cart Management.
Expand Down
13 changes: 13 additions & 0 deletions api/chatbot/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Use an official Python runtime as a parent image
FROM tiangolo/uvicorn-gunicorn-fastapi:python3.9

COPY requirements.txt requirements.txt
RUN pip3 install -r requirements.txt
RUN export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python

COPY . .

EXPOSE 8100

# Run app.py when the container launches
CMD ["python3", "app.py"]
35 changes: 35 additions & 0 deletions api/chatbot/app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import minsearch
import json
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import asyncio
import nest_asyncio
from chatbot_rag import rag, read_json

nest_asyncio.apply()

app = FastAPI()
file = 'food_description.json'
index = read_json(file)
print()

# Add CORS Middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)

@app.get("/rag/")
async def rag_endpoint(query: str):
try:
answer = rag(query, index)
return {"answer": answer}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))

if __name__ == '__main__':
uvicorn.run('app:app', port=8100, host='0.0.0.0', loop='asyncio')
92 changes: 92 additions & 0 deletions api/chatbot/chatbot_rag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import minsearch
import json
from g4f.client import Client
import g4f

_providers = [
g4f.Provider.Aichat,
g4f.Provider.ChatBase,
g4f.Provider.Bing,
g4f.Provider.GptGo,
g4f.Provider.You,
g4f.Provider.Yqcloud,
]

def llm(prompt):
client = Client()
print("Creating chat completion...")
chat_completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
ignored=["Ylokh", "GptGo", "AItianhu", "Aibn", "Myshell", "FreeGpt"],
stream=True
)

response = ""
print("Waiting for completion...")
for completion in chat_completion:
response += completion.choices[0].delta.content or ""

return {"response": response}

def read_json(file):
with open(file, 'rt', encoding='utf-8') as f_in: # Specify UTF-8 encoding
docs_raw = json.load(f_in)
print(f"Read {len(docs_raw)} documents from {file}.")

documents = []
for course_dict in docs_raw:
for doc in course_dict['items']:
doc['category'] = course_dict['category']
documents.append(doc)

index = minsearch.Index(
text_fields=["question", "description", "name"],
keyword_fields=["category"]
)
index.fit(documents)
print("Index has been initialized.")
return index

def search(query, index):

if index is None:
raise RuntimeError("Index has not been initialized. Call read_json() first.")

boost = {'question': 3.0, 'section': 0.5}

results = index.search(
query=query,
filter_dict={'category': 'FFood'},
boost_dict=boost,
num_results=5
)

return results

def build_prompt(query, search_results):
prompt_template = """
You're a vietnamese food ordering assistant. Answer the QUESTION based on the CONTEXT from the food description database.
Use only the facts from the CONTEXT when answering the QUESTION.

QUESTION: {question}

CONTEXT:
{context}
""".strip()

context = ""

for doc in search_results:
context = context + f"name: {doc['name']}\nquestion: {doc['question']}\nanswer: {doc['description']}\n\n"

prompt = prompt_template.format(question=query, context=context).strip()
return prompt

def rag(query, index):
print(f"Searching for: {query}")
search_results = search(query, index)
print(f"Found {len(search_results)} search results.")
prompt = build_prompt(query, search_results)
answer = llm(prompt)
return answer
Loading
Loading