diff --git a/autorag/__init__.py b/autorag/__init__.py index 3c1f9b7e7..df88f4acf 100644 --- a/autorag/__init__.py +++ b/autorag/__init__.py @@ -5,8 +5,10 @@ from rich.logging import RichHandler -from llama_index import OpenAIEmbedding -from llama_index.llms import OpenAI +from llama_index.embeddings import OpenAIEmbedding, HuggingFaceEmbedding +from llama_index.embeddings.openai import OpenAIEmbeddingModelType +from llama_index.llms import OpenAI, Anthropic, AzureOpenAI, HuggingFaceLLM, LangChainLLM, GradientBaseModelLLM, \ + GradientModelAdapterLLM, LiteLLM, LlamaCPP, OpenAILike, OpenLLM, PaLM, PredibaseLLM, Replicate, Xinference root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) version_path = os.path.join(root_path, 'VERSION') @@ -15,11 +17,32 @@ __version__ = f.read().strip() embedding_models = { - 'openai': OpenAIEmbedding(), + 'openai': OpenAIEmbedding(), # default model is OpenAIEmbeddingModelType.TEXT_EMBED_ADA_002 + 'openai_babbage': OpenAIEmbedding(model=OpenAIEmbeddingModelType.BABBAGE), + 'openai_ada': OpenAIEmbedding(model=OpenAIEmbeddingModelType.ADA), + 'openai_davinci': OpenAIEmbedding(model=OpenAIEmbeddingModelType.DAVINCI), + 'openai_curie': OpenAIEmbedding(model=OpenAIEmbeddingModelType.CURIE), + # you can use your own model in this way. + 'huggingface_baai_bge_small': HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5"), + 'huggingface_cointegrated_rubert_tiny2': HuggingFaceEmbedding(model_name="cointegrated/rubert-tiny2"), } generator_models = { 'openai': OpenAI, + 'anthropic': Anthropic, + 'azureopenai': AzureOpenAI, + 'huggingfacellm': HuggingFaceLLM, + 'langchainllm': LangChainLLM, + 'gradientbasemodelllm': GradientBaseModelLLM, + 'gradientmodeladapterllm': GradientModelAdapterLLM, + 'litellm': LiteLLM, + 'llamacpp': LlamaCPP, + 'openailike': OpenAILike, + 'openllm': OpenLLM, + 'palm': PaLM, + 'predibasellm': PredibaseLLM, + 'replicate': Replicate, + 'xinference': Xinference, } rich_format = "[%(filename)s:%(lineno)s] >> %(message)s"