Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove FastAPI and Supabase from library #29

Merged
merged 3 commits into from
Oct 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
DATABASE_URL=
DATABASE_MIGRATION_URL=
OPENAI_API_KEY=
HF_API_KEY=
PINECONE_API_KEY=
Expand Down
Binary file removed dist/nagato_ai-0.0.1-py3-none-any.whl
Binary file not shown.
Binary file removed dist/nagato_ai-0.0.1.tar.gz
Binary file not shown.
Binary file added dist/nagato_ai-0.0.5-py3-none-any.whl
Binary file not shown.
Binary file added dist/nagato_ai-0.0.5.tar.gz
Binary file not shown.
20 changes: 2 additions & 18 deletions lib/__init__.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,3 @@
import asyncio
# flake8: noqa

from lib.models.ingest import IngestRequest
from lib.service.flows import create_finetune


async def ingest(payload: IngestRequest):
"""Ingest data into pipeline"""

async def run_training_flow():
try:
await create_finetune(
payload=payload,
)
except Exception as flow_exception:
raise flow_exception

asyncio.create_task(run_training_flow())
return {"success": True}
from .service import create_finetuned_model, create_vector_embeddings
12 changes: 0 additions & 12 deletions lib/models/ingest.py

This file was deleted.

48 changes: 48 additions & 0 deletions lib/service/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from typing import List, Union

import openai
from llama_index import Document

from lib.service.embedding import EmbeddingService
from lib.service.finetune import get_finetuning_service


def create_vector_embeddings(
type: str, finetune_id: str, url: str = None, content: str = None
) -> List[Union[Document, None]]:
embedding_service = EmbeddingService(type=type, content=content, url=url)
documents = embedding_service.generate_documents()
nodes = embedding_service.generate_chunks(documents=documents)
embedding_service.generate_embeddings(nodes=nodes, finetune_id=finetune_id)
return nodes


def create_finetuned_model(
provider: str,
base_model: str,
type: str,
url: str = None,
content: str = None,
webhook_url: str = None,
):
embedding_service = EmbeddingService(type=type, url=url, content=content)
documents = embedding_service.generate_documents()
nodes = embedding_service.generate_chunks(documents=documents)
finetunning_service = get_finetuning_service(
nodes=nodes,
provider=provider,
batch_size=5,
base_model=base_model,
num_questions_per_chunk=1,
)
training_file = finetunning_service.generate_dataset()
formatted_training_file = finetunning_service.validate_dataset(
training_file=training_file
)
finetune = finetunning_service.finetune(
training_file=formatted_training_file, webhook_url=webhook_url
)
if provider == "OPENAI":
finetune = openai.FineTune.retrieve(id=finetune.get("id"))
finetunning_service.cleanup(training_file=finetune.get("training_file"))
return finetune
27 changes: 13 additions & 14 deletions lib/service/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,48 +8,47 @@
from numpy import ndarray
from sentence_transformers import SentenceTransformer

from lib.models.ingest import IngestRequest
from lib.service.vectordb import get_vector_service


class EmbeddingService:
def __init__(self, payload: IngestRequest):
self.payload = payload
def __init__(self, type: str, url: str = None, content: str = None):
self.type = type
self.url = url
self.content = content

def get_datasource_suffix(self) -> str:
suffixes = {"TXT": ".txt", "PDF": ".pdf", "MARKDOWN": ".md"}
try:
return suffixes[self.payload.type]
return suffixes[self.type]
except KeyError:
raise ValueError("Unsupported datasource type")

async def generate_documents(self) -> List[Document]:
def generate_documents(self) -> List[Document]:
with NamedTemporaryFile(
suffix=self.get_datasource_suffix(), delete=True
) as temp_file:
if self.payload.url:
content = requests.get(self.payload.url).content
if self.url:
content = requests.get(self.url).content
else:
content = self.payload.content
content = self.content
temp_file.write(content)
temp_file.flush()
reader = SimpleDirectoryReader(input_files=[temp_file.name])
docs = reader.load_data()
return docs

async def generate_chunks(
self, documents: List[Document]
) -> List[Union[Document, None]]:
def generate_chunks(self, documents: List[Document]) -> List[Union[Document, None]]:
parser = SimpleNodeParser.from_defaults(chunk_size=350, chunk_overlap=20)
nodes = parser.get_nodes_from_documents(documents, show_progress=True)
return nodes

async def generate_embeddings(
def generate_embeddings(
self,
nodes: List[Union[Document, None]],
finetune_id: str,
) -> List[ndarray]:
vectordb = await get_vector_service(
vectordb = get_vector_service(
provider="pinecone",
index_name="all-minilm-l6-v2",
namespace=finetune_id,
Expand All @@ -67,7 +66,7 @@ async def generate_embeddings(
{**node.metadata, "content": node.text},
)
embeddings.append(embedding)
await vectordb.upsert(vectors=embeddings)
vectordb.upsert(vectors=embeddings)
return embeddings

# def generate_query(self):
Expand Down
Loading