Skip to content

Commit

Permalink
Fix regression with Azure OpenAI content filtering metadata
Browse files Browse the repository at this point in the history
Resolved an issue where Azure OpenAI content filtering metadata was not being handled correctly due to recent refactoring. The previous fix was incorrectly applied to the `WrappedAnthropicClient` instead of the `WrappedOpenAIClient`. This update moves the necessary checks and processing for `content_filter_results` to the correct class, ensuring proper handling of content filtering metadata.
  • Loading branch information
mentatai[bot] committed Aug 21, 2024
1 parent bbb827f commit 44824c1
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions spice/wrapped_clients.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,10 @@ def process_chunk(self, chunk, call_args: SpiceCallArgs):
if chunk.usage is not None:
input_tokens = chunk.usage.prompt_tokens
output_tokens = chunk.usage.completion_tokens
# Handle content filtering metadata
if hasattr(chunk, 'content_filter_results'):
content_filter_results = chunk.content_filter_results
# Process content filter results as needed
return content, input_tokens, output_tokens

@override
Expand All @@ -116,6 +120,10 @@ def extract_text_and_tokens(self, chat_completion, call_args: SpiceCallArgs):
chat_completion.usage.prompt_tokens,
chat_completion.usage.completion_tokens,
)
# Handle content filtering metadata
if hasattr(chat_completion, 'content_filter_results'):
content_filter_results = chat_completion.content_filter_results
# Process content filter results as needed

@override
@contextmanager
Expand Down Expand Up @@ -381,10 +389,6 @@ def process_chunk(self, chunk, call_args: SpiceCallArgs):
input_tokens = chunk.message.usage.input_tokens
elif chunk.type == "message_delta":
output_tokens = chunk.usage.output_tokens
# Handle content filtering metadata
if hasattr(chunk, 'content_filter_results'):
content_filter_results = chunk.content_filter_results
# Process content filter results as needed
return content, input_tokens, output_tokens

@override
Expand All @@ -393,10 +397,6 @@ def extract_text_and_tokens(self, chat_completion, call_args: SpiceCallArgs):
content = ("{" if add_brace else "") + chat_completion.content[0].text
input_tokens = chat_completion.usage.input_tokens
output_tokens = chat_completion.usage.output_tokens
# Handle content filtering metadata
if hasattr(chat_completion, 'content_filter_results'):
content_filter_results = chat_completion.content_filter_results
# Process content filter results as needed
return content, input_tokens, output_tokens

@override
Expand Down

0 comments on commit 44824c1

Please sign in to comment.