-
Notifications
You must be signed in to change notification settings - Fork 268
/
models_config.sample.yaml
72 lines (66 loc) · 2.55 KB
/
models_config.sample.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
model_providers:
############################ Local ############################################
# Uncomment this provider if you want to use local models providers #
# using ollama and infinity model server #
###############################################################################
- provider_name: local-ollama
api_format: openai
base_url: http://ollama-server:11434/v1/
api_key_env_var: ""
llm_model_ids:
- "qwen2:1.5b"
embedding_model_ids: []
reranking_model_ids: []
default_headers: {}
- provider_name: local-infinity
api_format: openai
base_url: http://infinity-server:7997/
api_key_env_var: INFINITY_API_KEY
llm_model_ids: []
embedding_model_ids:
- "mixedbread-ai/mxbai-embed-large-v1"
reranking_model_ids:
- "mixedbread-ai/mxbai-rerank-xsmall-v1"
default_headers: {}
- provider_name: faster-whisper
api_format: openai
base_url: http://faster-whisper:8000
api_key_env_var: ""
llm_model_ids: []
embedding_model_ids: []
reranking_model_ids: []
audio_model_ids:
- "Systran/faster-distil-whisper-large-v3"
default_headers: {}
############################ OpenAI ###########################################
# Uncomment this provider if you want to use OpenAI as a models provider #
# Remember to set `OPENAI_API_KEY` in container environment #
###############################################################################
# - provider_name: openai
# api_format: openai
# api_key_env_var: OPENAI_API_KEY
# llm_model_ids:
# - "gpt-3.5-turbo"
# - "gpt-4o"
# embedding_model_ids:
# - "text-embedding-3-small"
# - "text-embedding-ada-002"
# reranking_model_ids: []
# default_headers: {}
############################ TrueFoundry ###########################################
# Uncomment this provider if you want to use TrueFoundry as a models provider #
# Remember to set `TFY_API_KEY` in container environment #
####################################################################################
# - provider_name: truefoundry
# api_format: openai
# base_url: https://llm-gateway.truefoundry.com/api/inference/openai
# api_key_env_var: TFY_API_KEY
# llm_model_ids:
# - "openai-main/gpt-4o-mini"
# - "openai-main/gpt-4-turbo"
# - "openai-main/gpt-3-5-turbo"
# embedding_model_ids:
# - "openai-main/text-embedding-3-small"
# - "openai-main/text-embedding-ada-002"
# reranking_model_ids: []
# default_headers: {}