From 1ba9a5b7268cf762b2376def9d7df82cdcad88b7 Mon Sep 17 00:00:00 2001 From: Bagatur Date: Mon, 8 Apr 2024 10:13:03 -0500 Subject: [PATCH] docs: standardize vertexai params --- cookbook/Multi_modal_RAG_google.ipynb | 8 +++----- .../docs/integrations/chat/google_vertex_ai_palm.ipynb | 10 ++++------ .../docs/integrations/llms/google_vertex_ai_palm.ipynb | 4 ++-- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/cookbook/Multi_modal_RAG_google.ipynb b/cookbook/Multi_modal_RAG_google.ipynb index e2b88b5317cab..5a1acb30070d2 100644 --- a/cookbook/Multi_modal_RAG_google.ipynb +++ b/cookbook/Multi_modal_RAG_google.ipynb @@ -185,7 +185,7 @@ " )\n", " # Text summary chain\n", " model = VertexAI(\n", - " temperature=0, model_name=\"gemini-pro\", max_output_tokens=1024\n", + " temperature=0, model_name=\"gemini-pro\", max_tokens=1024\n", " ).with_fallbacks([empty_response])\n", " summarize_chain = {\"element\": lambda x: x} | prompt | model | StrOutputParser()\n", "\n", @@ -254,7 +254,7 @@ "\n", "def image_summarize(img_base64, prompt):\n", " \"\"\"Make image summary\"\"\"\n", - " model = ChatVertexAI(model_name=\"gemini-pro-vision\", max_output_tokens=1024)\n", + " model = ChatVertexAI(model=\"gemini-pro-vision\", max_tokens=1024)\n", "\n", " msg = model(\n", " [\n", @@ -553,9 +553,7 @@ " \"\"\"\n", "\n", " # Multi-modal LLM\n", - " model = ChatVertexAI(\n", - " temperature=0, model_name=\"gemini-pro-vision\", max_output_tokens=1024\n", - " )\n", + " model = ChatVertexAI(temperature=0, model_name=\"gemini-pro-vision\", max_tokens=1024)\n", "\n", " # RAG pipeline\n", " chain = (\n", diff --git a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb index ecc7a653a0d92..6074c5154a05f 100644 --- a/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/chat/google_vertex_ai_palm.ipynb @@ -114,7 +114,7 @@ "human = \"Translate this sentence from English to French. I love programming.\"\n", "prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n", "\n", - "chat = ChatVertexAI(model_name=\"gemini-pro\", convert_system_message_to_human=True)\n", + "chat = ChatVertexAI(model=\"gemini-pro\", convert_system_message_to_human=True)\n", "\n", "chain = prompt | chat\n", "chain.invoke({})" @@ -233,9 +233,7 @@ } ], "source": [ - "chat = ChatVertexAI(\n", - " model_name=\"codechat-bison\", max_output_tokens=1000, temperature=0.5\n", - ")\n", + "chat = ChatVertexAI(model=\"codechat-bison\", max_tokens=1000, temperature=0.5)\n", "\n", "message = chat.invoke(\"Write a Python function generating all prime numbers\")\n", "print(message.content)" @@ -366,7 +364,7 @@ "from langchain.pydantic_v1 import BaseModel\n", "from langchain_google_vertexai import create_structured_runnable\n", "\n", - "llm = ChatVertexAI(model_name=\"gemini-pro\")\n", + "llm = ChatVertexAI(model=\"gemini-pro\")\n", "\n", "\n", "class MyModel(BaseModel):\n", @@ -424,7 +422,7 @@ "human = \"{text}\"\n", "prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", human)])\n", "\n", - "chat = ChatVertexAI(model_name=\"chat-bison\", max_output_tokens=1000, temperature=0.5)\n", + "chat = ChatVertexAI(model=\"chat-bison\", max_tokens=1000, temperature=0.5)\n", "chain = prompt | chat\n", "\n", "asyncio.run(\n", diff --git a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb index 4f53a75c925b4..c6c10fdb998ec 100644 --- a/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb +++ b/docs/docs/integrations/llms/google_vertex_ai_palm.ipynb @@ -311,7 +311,7 @@ } ], "source": [ - "llm = VertexAI(model_name=\"code-bison\", max_output_tokens=1000, temperature=0.3)\n", + "llm = VertexAI(model_name=\"code-bison\", max_tokens=1000, temperature=0.3)\n", "question = \"Write a python function that checks if a string is a valid email address\"\n", "print(model.invoke(question))" ] @@ -347,7 +347,7 @@ "from langchain_core.messages import HumanMessage\n", "from langchain_google_vertexai import ChatVertexAI\n", "\n", - "llm = ChatVertexAI(model_name=\"gemini-ultra-vision\")\n", + "llm = ChatVertexAI(model=\"gemini-ultra-vision\")\n", "\n", "image_message = {\n", " \"type\": \"image_url\",\n",