From e3a04e3fdbef2de256c671a60cae66b8dc0ef2c5 Mon Sep 17 00:00:00 2001 From: Benjamin Liu Date: Mon, 22 Jan 2024 09:18:56 +0800 Subject: [PATCH 1/2] MAX-32464 Add log to check cost not report issue on zinc related instance --- libs/community/langchain_community/callbacks/openai_info.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/community/langchain_community/callbacks/openai_info.py b/libs/community/langchain_community/callbacks/openai_info.py index bf0c59b746ea0..ba88f50cc9b20 100644 --- a/libs/community/langchain_community/callbacks/openai_info.py +++ b/libs/community/langchain_community/callbacks/openai_info.py @@ -189,12 +189,14 @@ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: completion_tokens = token_usage.get("completion_tokens", 0) prompt_tokens = token_usage.get("prompt_tokens", 0) model_name = standardize_model_name(response.llm_output.get("model_name", "")) + print(f"Report metrics cost issue check: {model_name}, {response.llm_output.get('model_name', '')}, {model_name in MODEL_COST_PER_1K_TOKENS}") # noqa: E501 if model_name in MODEL_COST_PER_1K_TOKENS: completion_cost = get_openai_token_cost_for_model( model_name, completion_tokens, is_completion=True ) prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) self.total_cost += prompt_cost + completion_cost + print(f"Report metrics cost issue check: completion_cost {completion_cost}, prompt_cost {prompt_cost}, total_cost {self.total_cost}") # noqa: E501 self.total_tokens += token_usage.get("total_tokens", 0) self.prompt_tokens += prompt_tokens self.completion_tokens += completion_tokens From b6b29433a7a436426961414e27491d3fa14b89f0 Mon Sep 17 00:00:00 2001 From: Jonathan Xi Date: Mon, 22 Jan 2024 10:21:38 +0800 Subject: [PATCH 2/2] Update openai_info.py add todo comment --- libs/community/langchain_community/callbacks/openai_info.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/community/langchain_community/callbacks/openai_info.py b/libs/community/langchain_community/callbacks/openai_info.py index ba88f50cc9b20..f62cc62823ee8 100644 --- a/libs/community/langchain_community/callbacks/openai_info.py +++ b/libs/community/langchain_community/callbacks/openai_info.py @@ -189,6 +189,7 @@ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: completion_tokens = token_usage.get("completion_tokens", 0) prompt_tokens = token_usage.get("prompt_tokens", 0) model_name = standardize_model_name(response.llm_output.get("model_name", "")) + # TODO Remove the following test log print(f"Report metrics cost issue check: {model_name}, {response.llm_output.get('model_name', '')}, {model_name in MODEL_COST_PER_1K_TOKENS}") # noqa: E501 if model_name in MODEL_COST_PER_1K_TOKENS: completion_cost = get_openai_token_cost_for_model( @@ -196,6 +197,7 @@ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: ) prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) self.total_cost += prompt_cost + completion_cost + # TODO Remove the following test log print(f"Report metrics cost issue check: completion_cost {completion_cost}, prompt_cost {prompt_cost}, total_cost {self.total_cost}") # noqa: E501 self.total_tokens += token_usage.get("total_tokens", 0) self.prompt_tokens += prompt_tokens