diff --git a/docs/modules/llms/async_llm.ipynb b/docs/modules/llms/async_llm.ipynb index 47493b8e9e606..730d010210359 100644 --- a/docs/modules/llms/async_llm.ipynb +++ b/docs/modules/llms/async_llm.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "id": "f6574496-b360-4ffa-9523-7fd34a590164", "metadata": {}, @@ -9,7 +10,7 @@ "\n", "LangChain provides async support for LLMs by leveraging the [asyncio](https://docs.python.org/3/library/asyncio.html) library.\n", "\n", - "Async support is particularly useful for calling multiple LLMs concurrently, as these calls are network-bound. Currently, only `OpenAI` is supported, but async support for other LLMs is on the roadmap.\n", + "Async support is particularly useful for calling multiple LLMs concurrently, as these calls are network-bound. Currently, only `OpenAI` and `PromptLayerOpenAI` is supported, but async support for other LLMs is on the roadmap.\n", "\n", "You can use the `agenerate` method to call an OpenAI LLM asynchronously." ] diff --git a/langchain/llms/promptlayer_openai.py b/langchain/llms/promptlayer_openai.py index bdad7eab7691d..23cba853df385 100644 --- a/langchain/llms/promptlayer_openai.py +++ b/langchain/llms/promptlayer_openai.py @@ -53,3 +53,27 @@ def _generate( get_api_key(), ) return generated_responses + + async def _agenerate( + self, prompts: List[str], stop: Optional[List[str]] = None + ) -> LLMResult: + from promptlayer.utils import get_api_key, promptlayer_api_request + + request_start_time = datetime.datetime.now().timestamp() + generated_responses = await super()._agenerate(prompts, stop) + request_end_time = datetime.datetime.now().timestamp() + for i in range(len(prompts)): + prompt = prompts[i] + resp = generated_responses.generations[i] + promptlayer_api_request( + "langchain.PromptLayerOpenAI.async", + "langchain", + [prompt], + self._identifying_params, + self.pl_tags, + resp[0].text, + request_start_time, + request_end_time, + get_api_key(), + ) + return generated_responses