diff --git a/autogen/oai/bedrock.py b/autogen/oai/bedrock.py index 21253b92d1..808aad5d76 100644 --- a/autogen/oai/bedrock.py +++ b/autogen/oai/bedrock.py @@ -96,7 +96,24 @@ def __init__(self, **kwargs: Any): if "response_format" in kwargs and kwargs["response_format"] is not None: warnings.warn("response_format is not supported for Bedrock, it will be ignored.", UserWarning) - self.bedrock_runtime = session.client(service_name="bedrock-runtime", config=bedrock_config) + # if haven't got any access_key or secret_key in environment variable or via arguments then + if ( + self._aws_access_key is None + or self._aws_access_key == "" + or self._aws_secret_key is None + or self._aws_secret_key == "" + ): + + # attempts to get client from attached role of managed service (lambda, ec2, ecs, etc.) + self.bedrock_runtime = boto3.client(service_name="bedrock-runtime", config=bedrock_config) + else: + session = boto3.Session( + aws_access_key_id=self._aws_access_key, + aws_secret_access_key=self._aws_secret_key, + aws_session_token=self._aws_session_token, + profile_name=self._aws_profile_name, + ) + self.bedrock_runtime = session.client(service_name="bedrock-runtime", config=bedrock_config) def message_retrieval(self, response): """Retrieve the messages from the response.""" diff --git a/website/docs/topics/non-openai-models/cloud-bedrock.ipynb b/website/docs/topics/non-openai-models/cloud-bedrock.ipynb index 71c1e2e7ff..db792516c6 100644 --- a/website/docs/topics/non-openai-models/cloud-bedrock.ipynb +++ b/website/docs/topics/non-openai-models/cloud-bedrock.ipynb @@ -10,26 +10,26 @@ "source": [ "# Amazon Bedrock\n", "\n", - "AutoGen allows you to use Amazon's generative AI Bedrock service to run inference with a number of open-weight models and as well as their own models.\n", + "AG2 allows you to use Amazon's generative AI Bedrock service to run inference with a number of open-weight models and as well as their own models.\n", "\n", "Amazon Bedrock supports models from providers such as Meta, Anthropic, Cohere, and Mistral.\n", "\n", - "In this notebook, we demonstrate how to use Anthropic's Sonnet model for AgentChat in AutoGen.\n", + "In this notebook, we demonstrate how to use Anthropic's Sonnet model for AgentChat in AG2.\n", "\n", "## Model features / support\n", "\n", - "Amazon Bedrock supports a wide range of models, not only for text generation but also for image classification and generation. Not all features are supported by AutoGen or by the Converse API used. Please see [Amazon's documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features) on the features supported by the Converse API.\n", + "Amazon Bedrock supports a wide range of models, not only for text generation but also for image classification and generation. Not all features are supported by AG2 or by the Converse API used. Please see [Amazon's documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html#conversation-inference-supported-models-features) on the features supported by the Converse API.\n", "\n", - "At this point in time AutoGen supports text generation and image classification (passing images to the LLM).\n", + "At this point in time AG2 supports text generation and image classification (passing images to the LLM).\n", "\n", "It does not, yet, support image generation ([contribute](https://microsoft.github.io/autogen/docs/contributor-guide/contributing/)).\n", "\n", "## Requirements\n", - "To use Amazon Bedrock with AutoGen, first you need to install the `pyautogen[bedrock]` package.\n", + "To use Amazon Bedrock with AG2, first you need to install the `ag2[bedrock]` package.\n", "\n", "## Pricing\n", "\n", - "When we combine the number of models supported and costs being on a per-region basis, it's not feasible to maintain the costs for each model+region combination within the AutoGen implementation. Therefore, it's recommended that you add the following to your config with cost per 1,000 input and output tokens, respectively:\n", + "When we combine the number of models supported and costs being on a per-region basis, it's not feasible to maintain the costs for each model+region combination within the AG2 implementation. Therefore, it's recommended that you add the following to your config with cost per 1,000 input and output tokens, respectively:\n", "```\n", "{\n", " ...\n", @@ -125,6 +125,15 @@ "```" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using within an AWS Lambda function\n", + "\n", + "If you are using your AG2 code within an AWS Lambda function, you can utilise the attached role to access the Bedrock service and do not need to provide access, token, or profile values." + ] + }, { "cell_type": "markdown", "metadata": {},