diff --git a/litellm/proxy/test_openai_request.py b/litellm/proxy/test_openai_request.py new file mode 100644 index 000000000000..97f89c232c8b --- /dev/null +++ b/litellm/proxy/test_openai_request.py @@ -0,0 +1,15 @@ +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:8000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } +]) + +print(response)