diff --git a/libs/community/langchain_community/callbacks/openai_info.py b/libs/community/langchain_community/callbacks/openai_info.py index bf0c59b746ea0..f62cc62823ee8 100644 --- a/libs/community/langchain_community/callbacks/openai_info.py +++ b/libs/community/langchain_community/callbacks/openai_info.py @@ -189,12 +189,16 @@ def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: completion_tokens = token_usage.get("completion_tokens", 0) prompt_tokens = token_usage.get("prompt_tokens", 0) model_name = standardize_model_name(response.llm_output.get("model_name", "")) + # TODO Remove the following test log + print(f"Report metrics cost issue check: {model_name}, {response.llm_output.get('model_name', '')}, {model_name in MODEL_COST_PER_1K_TOKENS}") # noqa: E501 if model_name in MODEL_COST_PER_1K_TOKENS: completion_cost = get_openai_token_cost_for_model( model_name, completion_tokens, is_completion=True ) prompt_cost = get_openai_token_cost_for_model(model_name, prompt_tokens) self.total_cost += prompt_cost + completion_cost + # TODO Remove the following test log + print(f"Report metrics cost issue check: completion_cost {completion_cost}, prompt_cost {prompt_cost}, total_cost {self.total_cost}") # noqa: E501 self.total_tokens += token_usage.get("total_tokens", 0) self.prompt_tokens += prompt_tokens self.completion_tokens += completion_tokens