diff --git a/OpenAI.Playground/Program.cs b/OpenAI.Playground/Program.cs index 914c1673..b705009c 100644 --- a/OpenAI.Playground/Program.cs +++ b/OpenAI.Playground/Program.cs @@ -4,6 +4,7 @@ using OpenAI.Extensions; using OpenAI.Interfaces; using OpenAI.Playground.TestHelpers; +using OpenAI.Playground.TestHelpers.AssistantHelpers; var builder = new ConfigurationBuilder().AddJsonFile("ApiSettings.json") .AddUserSecrets(); diff --git a/OpenAI.Playground/TestHelpers/AssistantHelpers/RunTestHelper.cs b/OpenAI.Playground/TestHelpers/AssistantHelpers/RunTestHelper.cs index 36952945..8be27539 100644 --- a/OpenAI.Playground/TestHelpers/AssistantHelpers/RunTestHelper.cs +++ b/OpenAI.Playground/TestHelpers/AssistantHelpers/RunTestHelper.cs @@ -22,6 +22,7 @@ public static async Task RunTests(IOpenAIService openAI) await RunCancelTests(openAI); await RunToolTests(openAI); await RunThreadAndRunTests(openAI); + await RunStreamTests(openAI); await Cleanup(openAI); } @@ -56,11 +57,28 @@ public static async Task RunToolTests(IOpenAIService openAI) await Cleanup(openAI); } + public static async Task RunStreamTests(IOpenAIService openAI) + { + ConsoleExtensions.WriteLine("Run Stream Testing is starting:", ConsoleColor.Blue); + await CreateRunAsStreamTest(openAI); + await Cleanup(openAI); + await CreateThreadAndRunAsStream(openAI); + await Cleanup(openAI); + await CreateToolRunTest(openAI); + await ListRunsTest(openAI); + await RetrieveRunTest(openAI); + await ModifyRunTest(openAI); + await WaitUntil(openAI, "requires_action"); + await SubmitToolOutputsAsStreamToRunTest(openAI); + await Cleanup(openAI); + } + public static async Task RunThreadAndRunTests(IOpenAIService openAI) { ConsoleExtensions.WriteLine("Run Thread and Run Testing is starting:", ConsoleColor.Blue); await CreateThreadAndRun(openAI); } + public static async Task CreateRunTest(IOpenAIService openAI) { @@ -111,6 +129,65 @@ public static async Task CreateRunTest(IOpenAIService openAI) } } + public static async Task CreateRunAsStreamTest(IOpenAIService openAI) + { + ConsoleExtensions.WriteLine("Run Create As Stream Testing is starting:", ConsoleColor.Cyan); + var assistantResult = await openAI.Beta.Assistants.AssistantCreate(new() + { + Instructions = "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + Name = "Math Tutor", + Tools = [ToolDefinition.DefineCodeInterpreter()], + Model = Models.Gpt_4_turbo + }); + if (assistantResult.Successful) + { + CreatedAssistantId = assistantResult.Id; + ConsoleExtensions.WriteLine($"Assistant Created Successfully with ID: {assistantResult.Id}", ConsoleColor.Green); + } + else + { + ConsoleExtensions.WriteError(assistantResult.Error); + return; + } + + var threadResult = await openAI.Beta.Threads.ThreadCreate(); + if (threadResult.Successful) + { + CreatedThreadId = threadResult.Id; + ConsoleExtensions.WriteLine($"Thread Created Successfully with ID: {threadResult.Id}", ConsoleColor.Green); + } + else + { + ConsoleExtensions.WriteError(threadResult.Error); + return; + } + + var result = openAI.Beta.Runs.RunCreateAsStream(CreatedThreadId, new() + { + AssistantId = assistantResult.Id + }); + + await foreach (var run in result) + { + if (run.Successful) + { + if (string.IsNullOrEmpty(run.Status)) + { + Console.Write("."); + } + else + { + ConsoleExtensions.WriteLine($"Run Id: {run.Id}, Status: {run.Status}"); + } + } + else + { + ConsoleExtensions.WriteError(run.Error); + } + } + + } + public static async Task CreateToolRunTest(IOpenAIService openAI) { ConsoleExtensions.WriteLine("Run Create Tool Testing is starting:", ConsoleColor.Cyan); @@ -343,6 +420,55 @@ public static async Task SubmitToolOutputsToRunTest(IOpenAIService openAI) } } + public static async Task SubmitToolOutputsAsStreamToRunTest(IOpenAIService openAI) + { + ConsoleExtensions.WriteLine("Submit Tool Outputs To Run Testing is starting:", ConsoleColor.Cyan); + if (string.IsNullOrWhiteSpace(CreatedRunId)) + { + ConsoleExtensions.WriteLine("Run Id is not found. Please create a run first.", ConsoleColor.Red); + return; + } + + if (string.IsNullOrWhiteSpace(CreatedThreadId)) + { + ConsoleExtensions.WriteLine("Thread Id is not found. Please create a thread first.", ConsoleColor.Red); + return; + } + + var retrieveResult = await openAI.Beta.Runs.RunRetrieve(CreatedThreadId, CreatedRunId); + var result = openAI.Beta.Runs.RunSubmitToolOutputsAsStream(CreatedThreadId, CreatedRunId, new() + { + ToolOutputs = + [ + new() + { + ToolCallId = retrieveResult.RequiredAction!.SubmitToolOutputs.ToolCalls.First() + .Id, + Output = "70 degrees and sunny." + } + ] + }); + + await foreach (var run in result) + { + if (run.Successful) + { + if (string.IsNullOrEmpty(run.Status)) + { + Console.Write("."); + } + else + { + ConsoleExtensions.WriteLine($"Run Id: {run.Id}, Status: {run.Status}"); + } + } + else + { + ConsoleExtensions.WriteError(run.Error); + } + } + } + public static async Task CancelRunTest(IOpenAIService openAI) { ConsoleExtensions.WriteLine("Run Cancel Testing is starting:", ConsoleColor.Cyan); @@ -488,6 +614,56 @@ public static async Task CreateThreadAndRun(IOpenAIService sdk) } } + public static async Task CreateThreadAndRunAsStream(IOpenAIService sdk) + { + ConsoleExtensions.WriteLine("Create Thread and Run As Stream Testing is starting:", ConsoleColor.Cyan); + var assistantResult = await sdk.Beta.Assistants.AssistantCreate(new() + { + Instructions = "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + Name = "Math Tutor", + Tools = [ToolDefinition.DefineCodeInterpreter()], + Model = Models.Gpt_4_turbo + }); + CreatedAssistantId = assistantResult.Id; + var runResult = sdk.Beta.Runs.CreateThreadAndRunAsStream(new() + { + AssistantId = assistantResult.Id, + Thread = new() + { + Messages = + [ + new() + { + Role = StaticValues.AssistantsStatics.MessageStatics.Roles.User, + Content = new("Explain deep learning to a 5 year old.") + } + ] + } + }); + + await foreach (var run in runResult) + { + if (run.Successful) + { + if (string.IsNullOrEmpty(run.Status)) + { + Console.Write("."); + } + else + { + ConsoleExtensions.WriteLine($"Run Id: {run.Id}, Status: {run.Status}"); + } + } + else + { + ConsoleExtensions.WriteError(run.Error); + } + } + ConsoleExtensions.WriteLine("Create Thread and Run As Stream Test is successful.", ConsoleColor.Green); + + + } + public static async Task Cleanup(IOpenAIService sdk) { ConsoleExtensions.WriteLine("Cleanup Testing is starting:", ConsoleColor.Cyan); @@ -496,6 +672,7 @@ public static async Task Cleanup(IOpenAIService sdk) var threadResult = await sdk.Beta.Threads.ThreadDelete(CreatedThreadId); if (threadResult.Successful) { + CreatedThreadId = null; ConsoleExtensions.WriteLine("Thread Deleted Successfully.", ConsoleColor.Green); } else @@ -509,6 +686,7 @@ public static async Task Cleanup(IOpenAIService sdk) var assistantResult = await sdk.Beta.Assistants.AssistantDelete(CreatedAssistantId); if (assistantResult.Successful) { + CreatedAssistantId = null; ConsoleExtensions.WriteLine("Assistant Deleted Successfully.", ConsoleColor.Green); } else diff --git a/OpenAI.SDK/Extensions/StreamHandleExtension.cs b/OpenAI.SDK/Extensions/StreamHandleExtension.cs new file mode 100644 index 00000000..bc6a9c30 --- /dev/null +++ b/OpenAI.SDK/Extensions/StreamHandleExtension.cs @@ -0,0 +1,195 @@ +using System.Runtime.CompilerServices; +using System.Text.Json; +using OpenAI.ObjectModels; +using OpenAI.ObjectModels.RequestModels; +using OpenAI.ObjectModels.ResponseModels; + +namespace OpenAI.Extensions; + +public static class StreamHandleExtension +{ + public static async IAsyncEnumerable AsStream(this HttpResponseMessage response, bool justDataMode = true, [EnumeratorCancellation] CancellationToken cancellationToken = default) where TResponse : BaseResponse, new() + { + // Helper data in case we need to reassemble a multi-packet response + ReassemblyContext ctx = new(); + + // Ensure that we parse headers only once to improve performance a little bit. + var httpStatusCode = response.StatusCode; + var headerValues = response.ParseHeaders(); + + await using var stream = await response.Content.ReadAsStreamAsync(cancellationToken); + using var reader = new StreamReader(stream); + + // Continuously read the stream until the end of it + while (true) + { + cancellationToken.ThrowIfCancellationRequested(); + + var line = await reader.ReadLineAsync(); + // Break the loop if we have reached the end of the stream + if (line == null) + { + break; + } + + // Skip empty lines + if (string.IsNullOrEmpty(line)) + { + continue; + } + + if (justDataMode && !line.StartsWith("data: ")) + { + continue; + } + + line = line.RemoveIfStartWith("data: "); + + // Exit the loop if the stream is done + if (line.StartsWith("[DONE]")) + { + break; + } + + TResponse? block; + try + { + // When the response is good, each line is a serializable CompletionCreateRequest + block = JsonSerializer.Deserialize(line); + } + catch (Exception) + { + // When the API returns an error, it does not come back as a block, it returns a single character of text ("{"). + // In this instance, read through the rest of the response, which should be a complete object to parse. + line += await reader.ReadToEndAsync(); + block = JsonSerializer.Deserialize(line); + } + + + if (null != block) + { + if (typeof(TResponse) == typeof(ChatCompletionCreateResponse)) + { + ctx.Process(block as ChatCompletionCreateResponse ?? throw new InvalidOperationException()); + } + + if (!ctx.IsFnAssemblyActive) + { + block.HttpStatusCode = httpStatusCode; + block.HeaderValues = headerValues; + yield return block; + } + } + } + } + + private class ReassemblyContext + { + private IList _deltaFnCallList = new List(); + public bool IsFnAssemblyActive => _deltaFnCallList.Count > 0; + + + /// + /// Detects if a response block is a part of a multi-chunk + /// streamed tool call response of type == "function". As long as that's true, + /// it keeps accumulating block contents even handling multiple parallel tool calls, and once all the function call + /// streaming is done, it produces the assembled results in the final block. + /// + /// + public void Process(ChatCompletionCreateResponse block) + { + var firstChoice = block.Choices?.FirstOrDefault(); + if (firstChoice == null) + { + return; + } // not a valid state? nothing to do + + var isStreamingFnCall = IsStreamingFunctionCall(); + var isStreamingFnCallEnd = firstChoice.FinishReason != null; + + var justStarted = false; + + // Check if the streaming block has a tool_call segment of "function" type, according to the value returned by IsStreamingFunctionCall() above. + // If so, this is the beginning entry point of a function call assembly for each tool_call main item, even in case of multiple parallel tool calls. + // We're going to steal the partial message and squirrel it away for the time being. + if (isStreamingFnCall) + { + foreach (var t in firstChoice.Message.ToolCalls!) + { + //Handles just ToolCall type == "function" as according to the value returned by IsStreamingFunctionCall() above + if (t.FunctionCall != null && t.Type == StaticValues.CompletionStatics.ToolType.Function) + _deltaFnCallList.Add(t); + } + + justStarted = true; + } + + // As long as we're assembling, keep on appending those args, + // respecting the stream arguments sequence aligned with the last tool call main item which the arguments belong to. + if (IsFnAssemblyActive && !justStarted) + { + //Get current toolcall metadata in order to search by index reference which to bind arguments to. + var tcMetadata = GetToolCallMetadata(); + + if (tcMetadata.index > -1) + { + //Handles just ToolCall type == "function" + using var argumentsList = ExtractArgsSoFar().GetEnumerator(); + var existItems = argumentsList.MoveNext(); + + if (existItems) + { + //toolcall item must exists as added in previous steps, otherwise First() will raise an InvalidOperationException + var tc = _deltaFnCallList!.First(t => t.Index == tcMetadata.index); + tc.FunctionCall!.Arguments += argumentsList.Current; + argumentsList.MoveNext(); + } + } + } + + // If we were assembling and it just finished, fill this block with the info we've assembled, and we're done. + if (IsFnAssemblyActive && isStreamingFnCallEnd) + { + firstChoice.Message ??= ChatMessage.FromAssistant(""); // just in case? not sure it's needed + // TODO When more than one function call is in a single index, OpenAI only returns the role delta at the beginning, which causes an issue. + // TODO The current solution addresses this problem, but we need to fix it by using the role of the index. + firstChoice.Message.Role ??= "assistant"; + firstChoice.Message.ToolCalls = new List(_deltaFnCallList); + _deltaFnCallList.Clear(); + } + + // Returns true if we're actively streaming, and also have a partial tool call main item ( id != (null | "")) of type "function" in the response + bool IsStreamingFunctionCall() + { + return firstChoice.FinishReason == null && // actively streaming, is a tool call main item, and have a function call + firstChoice.Message?.ToolCalls?.Count > 0 && (firstChoice.Message?.ToolCalls.Any(t => t.FunctionCall != null && !string.IsNullOrEmpty(t.Id) && t.Type == StaticValues.CompletionStatics.ToolType.Function) ?? false); + } + + (int index, string? id, string? type) GetToolCallMetadata() + { + var tc = block.Choices?.FirstOrDefault()?.Message?.ToolCalls?.Where(t => t.FunctionCall != null).Select(t => t).FirstOrDefault(); + + return tc switch + { + not null => (tc.Index, tc.Id, tc.Type), + _ => (-1, default, default) + }; + } + + IEnumerable ExtractArgsSoFar() + { + var toolCalls = block.Choices?.FirstOrDefault()?.Message?.ToolCalls; + + if (toolCalls != null) + { + var functionCallList = toolCalls.Where(t => t.FunctionCall != null).Select(t => t.FunctionCall); + + foreach (var functionCall in functionCallList) + { + yield return functionCall!.Arguments ?? ""; + } + } + } + } + } +} \ No newline at end of file diff --git a/OpenAI.SDK/Interfaces/IRunService.cs b/OpenAI.SDK/Interfaces/IRunService.cs index 19df623e..fa6dfc16 100644 --- a/OpenAI.SDK/Interfaces/IRunService.cs +++ b/OpenAI.SDK/Interfaces/IRunService.cs @@ -1,4 +1,5 @@ -using OpenAI.ObjectModels.RequestModels; +using System.Runtime.CompilerServices; +using OpenAI.ObjectModels.RequestModels; using OpenAI.ObjectModels.SharedModels; namespace OpenAI.Interfaces; @@ -15,6 +16,17 @@ public interface IRunService /// Task RunCreate(string threadId, RunCreateRequest request, string? modelId = null, CancellationToken cancellationToken = default); + /// + /// Create a run and stream the results. + /// + /// + /// + /// + /// + /// + /// + IAsyncEnumerable RunCreateAsStream(string threadId, RunCreateRequest request, string? modelId = null, bool justDataMode = true, [EnumeratorCancellation] CancellationToken cancellationToken = default); + /// /// Retrieves a run. /// @@ -47,22 +59,40 @@ public interface IRunService /// /// Task RunSubmitToolOutputs(string threadId, string runId, SubmitToolOutputsToRunRequest request, CancellationToken cancellationToken = default); - + /// - /// Modifies a run. + /// Submit tool outputs to run + /// + /// When a run has the status: "requires_action" and required_action.type is submit_tool_outputs, + /// this endpoint can be used to submit the outputs from the tool calls once they're all completed. + /// All outputs must be submitted in a single request. + /// /// - /// The ID of the [thread](/docs/api-reference/threads) that was run. - /// The ID of the run to modify. + /// + /// /// /// /// - Task RunModify(string threadId, string runId, RunModifyRequest request, CancellationToken cancellationToken = default); + IAsyncEnumerable RunSubmitToolOutputsAsStream(string threadId, string runId, SubmitToolOutputsToRunRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default); + + /// + /// Modifies a run. + /// + /// The ID of the [thread](/docs/api-reference/threads) that was run. + /// The ID of the run to modify. + /// + /// + /// + Task RunModify(string threadId, string runId, RunModifyRequest request, CancellationToken cancellationToken = default); /// /// Create a thread and run it in one request. /// Task CreateThreadAndRun(CreateThreadAndRunRequest requestBody, CancellationToken cancellationToken = default); - + /// + /// Create a thread and run it in one request as Stream. + /// + IAsyncEnumerable CreateThreadAndRunAsStream(CreateThreadAndRunRequest createThreadAndRunRequest, string? modelId = null, bool justDataMode = true, [EnumeratorCancellation] CancellationToken cancellationToken = default); /// /// Returns a list of runs belonging to a thread. /// diff --git a/OpenAI.SDK/Managers/OpenAIChatCompletions.cs b/OpenAI.SDK/Managers/OpenAIChatCompletions.cs index 5e2d4206..cb3e712f 100644 --- a/OpenAI.SDK/Managers/OpenAIChatCompletions.cs +++ b/OpenAI.SDK/Managers/OpenAIChatCompletions.cs @@ -21,9 +21,6 @@ public async Task CreateCompletion(ChatCompletionC public async IAsyncEnumerable CreateCompletionAsStream(ChatCompletionCreateRequest chatCompletionCreateRequest, string? modelId = null, bool justDataMode = true, [EnumeratorCancellation] CancellationToken cancellationToken = default) { - // Helper data in case we need to reassemble a multi-packet response - ReassemblyContext ctx = new(); - // Mark the request as streaming chatCompletionCreateRequest.Stream = true; @@ -38,196 +35,9 @@ public async IAsyncEnumerable CreateCompletionAsSt yield break; } - // Ensure that we parse headers only once to improve performance a little bit. - var httpStatusCode = response.StatusCode; - var headerValues = response.ParseHeaders(); - - await using var stream = await response.Content.ReadAsStreamAsync(cancellationToken); - using var reader = new StreamReader(stream); - - // Continuously read the stream until the end of it - while (true) - { - cancellationToken.ThrowIfCancellationRequested(); - - var line = await reader.ReadLineAsync(); - // Break the loop if we have reached the end of the stream - if (line == null) - { - break; - } - - // Skip empty lines - if (string.IsNullOrEmpty(line)) - { - continue; - } - - if (justDataMode && !line.StartsWith("data: ")) - { - continue; - } - - line = line.RemoveIfStartWith("data: "); - - // Exit the loop if the stream is done - if (line.StartsWith("[DONE]")) - { - break; - } - - ChatCompletionCreateResponse? block; - try - { - // When the response is good, each line is a serializable CompletionCreateRequest - block = JsonSerializer.Deserialize(line); - } - catch (Exception) - { - // When the API returns an error, it does not come back as a block, it returns a single character of text ("{"). - // In this instance, read through the rest of the response, which should be a complete object to parse. - line += await reader.ReadToEndAsync(); - block = JsonSerializer.Deserialize(line); - } - - - if (null != block) - { - ctx.Process(block); - - if (!ctx.IsFnAssemblyActive) - { - block.HttpStatusCode = httpStatusCode; - block.HeaderValues = headerValues; - yield return block; - } - } - } + await foreach (var baseResponse in response.AsStream(cancellationToken: cancellationToken)) yield return baseResponse; + } - /// - /// This helper class attempts to reassemble a tool call with type == "function" response - /// that was split up across several streamed chunks. - /// Note that this only works for the first message in each response, - /// and ignores the others; if OpenAI ever changes their response format - /// this will need to be adjusted. - /// - private class ReassemblyContext - { - private IList _deltaFnCallList = new List(); - public bool IsFnAssemblyActive => _deltaFnCallList.Count > 0; - - - /// - /// Detects if a response block is a part of a multi-chunk - /// streamed tool call response of type == "function". As long as that's true, - /// it keeps accumulating block contents even handling multiple parallel tool calls, and once all the function call - /// streaming is done, it produces the assembled results in the final block. - /// - /// - public void Process(ChatCompletionCreateResponse block) - { - var firstChoice = block.Choices?.FirstOrDefault(); - if (firstChoice == null) - { - return; - } // not a valid state? nothing to do - - var isStreamingFnCall = IsStreamingFunctionCall(); - var isStreamingFnCallEnd = firstChoice.FinishReason != null; - - var justStarted = false; - - // Check if the streaming block has a tool_call segment of "function" type, according to the value returned by IsStreamingFunctionCall() above. - // If so, this is the beginning entry point of a function call assembly for each tool_call main item, even in case of multiple parallel tool calls. - // We're going to steal the partial message and squirrel it away for the time being. - if (isStreamingFnCall) - { - foreach (var t in firstChoice.Message.ToolCalls!) - { - //Handles just ToolCall type == "function" as according to the value returned by IsStreamingFunctionCall() above - if (t.FunctionCall != null && t.Type == StaticValues.CompletionStatics.ToolType.Function) - _deltaFnCallList.Add(t); - } - - justStarted = true; - } - - // As long as we're assembling, keep on appending those args, - // respecting the stream arguments sequence aligned with the last tool call main item which the arguments belong to. - if (IsFnAssemblyActive && !justStarted) - { - //Get current toolcall metadata in order to search by index reference which to bind arguments to. - var tcMetadata = GetToolCallMetadata(); - - if (tcMetadata.index > -1) - { - //Handles just ToolCall type == "function" - using var argumentsList = ExtractArgsSoFar() - .GetEnumerator(); - var existItems = argumentsList.MoveNext(); - - if (existItems) - { - //toolcall item must exists as added in previous steps, otherwise First() will raise an InvalidOperationException - var tc = _deltaFnCallList!.First(t => t.Index == tcMetadata.index); - tc.FunctionCall!.Arguments += argumentsList.Current; - argumentsList.MoveNext(); - } - } - } - - // If we were assembling and it just finished, fill this block with the info we've assembled, and we're done. - if (IsFnAssemblyActive && isStreamingFnCallEnd) - { - firstChoice.Message ??= ChatMessage.FromAssistant(""); // just in case? not sure it's needed - // TODO When more than one function call is in a single index, OpenAI only returns the role delta at the beginning, which causes an issue. - // TODO The current solution addresses this problem, but we need to fix it by using the role of the index. - firstChoice.Message.Role ??= "assistant"; - firstChoice.Message.ToolCalls = new List(_deltaFnCallList); - _deltaFnCallList.Clear(); - } - - // Returns true if we're actively streaming, and also have a partial tool call main item ( id != (null | "")) of type "function" in the response - bool IsStreamingFunctionCall() - { - return firstChoice.FinishReason == null && // actively streaming, is a tool call main item, and have a function call - firstChoice.Message?.ToolCalls?.Count > 0 && (firstChoice.Message?.ToolCalls.Any(t => t.FunctionCall != null && !string.IsNullOrEmpty(t.Id) && t.Type == StaticValues.CompletionStatics.ToolType.Function) ?? false); - } - - (int index, string? id, string? type) GetToolCallMetadata() - { - var tc = block.Choices - ?.FirstOrDefault() - ?.Message - ?.ToolCalls - ?.Where(t => t.FunctionCall != null) - .Select(t => t) - .FirstOrDefault(); - - return tc switch - { - not null => (tc.Index, tc.Id, tc.Type), - _ => (-1, default, default) - }; - } - - IEnumerable ExtractArgsSoFar() - { - var toolCalls = block.Choices?.FirstOrDefault() - ?.Message?.ToolCalls; - - if (toolCalls != null) - { - var functionCallList = toolCalls.Where(t => t.FunctionCall != null) - .Select(t => t.FunctionCall); - - foreach (var functionCall in functionCallList) - { - yield return functionCall!.Arguments ?? ""; - } - } - } - } - } + } \ No newline at end of file diff --git a/OpenAI.SDK/Managers/OpenAIRunService.cs b/OpenAI.SDK/Managers/OpenAIRunService.cs index 1f6e7ea0..8ce7abbb 100644 --- a/OpenAI.SDK/Managers/OpenAIRunService.cs +++ b/OpenAI.SDK/Managers/OpenAIRunService.cs @@ -2,6 +2,7 @@ using OpenAI.Interfaces; using OpenAI.ObjectModels.RequestModels; using OpenAI.ObjectModels.SharedModels; +using System.Runtime.CompilerServices; namespace OpenAI.Managers; @@ -26,6 +27,37 @@ public async Task RunCreate(string threadId, RunCreateRequest reque request.ProcessModelId(modelId, _defaultModelId,true); return await _httpClient.PostAndReadAsAsync(_endpointProvider.RunCreate(threadId), request, cancellationToken); } + + /// + /// + /// + /// + /// + /// + /// + /// + /// + public async IAsyncEnumerable RunCreateAsStream(string threadId, RunCreateRequest request, string? modelId = null, bool justDataMode = true, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + // Mark the request as streaming + request.Stream = true; + + // Send the request to the CompletionCreate endpoint + request.ProcessModelId(modelId, _defaultModelId,true); + + using var response = _httpClient.PostAsStreamAsync(_endpointProvider.RunCreate(threadId), request, cancellationToken); + + if (!response.IsSuccessStatusCode) + { + yield return await response.HandleResponseContent(cancellationToken); + yield break; + } + + await foreach (var baseResponse in response.AsStream(cancellationToken: cancellationToken)) yield return baseResponse; + + } + /// public async Task RunModify(string threadId, string runId, RunModifyRequest request, CancellationToken cancellationToken = default) @@ -111,11 +143,49 @@ public async Task RunSubmitToolOutputs(string threadId, string runI return await _httpClient.PostAndReadAsAsync(_endpointProvider.RunSubmitToolOutputs(threadId, runId), request, cancellationToken); } + public async IAsyncEnumerable RunSubmitToolOutputsAsStream(string threadId, string runId, SubmitToolOutputsToRunRequest request, [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + // Mark the request as streaming + request.Stream = true; + + // Send the request to the CompletionCreate endpoint + using var response = _httpClient.PostAsStreamAsync(_endpointProvider.RunSubmitToolOutputs(threadId, runId), request, cancellationToken); + + if (!response.IsSuccessStatusCode) + { + yield return await response.HandleResponseContent(cancellationToken); + yield break; + } + + await foreach (var baseResponse in response.AsStream(cancellationToken: cancellationToken)) yield return baseResponse; + } + /// public async Task CreateThreadAndRun(CreateThreadAndRunRequest requestBody, CancellationToken cancellationToken = default) { return await _httpClient.PostAndReadAsAsync(_endpointProvider.ThreadAndRunCreate(), requestBody, cancellationToken); } + + public async IAsyncEnumerable CreateThreadAndRunAsStream(CreateThreadAndRunRequest createThreadAndRunRequest, string? modelId = null, bool justDataMode = true, + [EnumeratorCancellation] CancellationToken cancellationToken = default) + { + // Mark the request as streaming + createThreadAndRunRequest.Stream = true; + + // Send the request to the CompletionCreate endpoint + createThreadAndRunRequest.ProcessModelId(modelId, _defaultModelId,allowNull:true); + + using var response = _httpClient.PostAsStreamAsync(_endpointProvider.ThreadAndRunCreate(), createThreadAndRunRequest, cancellationToken); + + if (!response.IsSuccessStatusCode) + { + yield return await response.HandleResponseContent(cancellationToken); + yield break; + } + + await foreach (var baseResponse in response.AsStream(cancellationToken: cancellationToken)) yield return baseResponse; + + } /// public async Task ListRuns(string threadId, PaginationRequest runListRequest, CancellationToken cancellationToken = default) diff --git a/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs b/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs index 0a9c25b6..2d3f12b1 100644 --- a/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs +++ b/OpenAI.SDK/ObjectModels/RequestModels/AssistantModifyRequest.cs @@ -3,7 +3,7 @@ namespace OpenAI.ObjectModels.RequestModels; -public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileIds, IOpenAiModels.IMetaData +public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IMetaData, IOpenAiModels.ITemperature { /// /// The name of the assistant. The maximum length is 256 @@ -30,10 +30,12 @@ public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileI public List? Tools { get; set; } /// - /// A list of File IDs attached to this assistant. + /// A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For + /// example, the code_interpreter tool requires a list of file IDs, while the file_search tool requires a list of + /// vector store IDs. /// - [JsonPropertyName("file_ids")] - public List? FileIds { get; set; } + [JsonPropertyName("tool_resources")] + public ToolResources? ToolResources { get; set; } /// /// Set of 16 key-value pairs that can be attached to an object. @@ -46,4 +48,37 @@ public class AssistantModifyRequest : IOpenAiModels.IModel, IOpenAiModels.IFileI /// [JsonPropertyName("model")] public string Model { get; set; } + + /// + /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while + /// lower values like 0.2 will make it more focused and deterministic. + /// + [JsonPropertyName("temperature")] + public float? Temperature { get; set; } + + /// + /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the + /// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are + /// considered. + /// We generally recommend altering this or temperature but not both. + /// + [JsonPropertyName("top_p")] + public double? TopP { get; set; } + + /// + /// Specifies the format that the model must output. Compatible with + /// GPT-4o, + /// GPT-4 Turbo, and all GPT-3.5 Turbo + /// models since gpt-3.5-turbo-1106. + /// Setting to { "type": "json_object" } enables JSON mode, which guarantees the message the model generates is + /// valid JSON.
+ /// Important: when using JSON mode, you must also instruct the model to produce JSON yourself via a system or + /// user message.Without this, the model may generate an unending stream of whitespace until the generation reaches the + /// token limit, resulting in a long-running and seemingly "stuck" request.Also note that the message content may be + /// partially cut off if finish_reason= "length", which indicates the generation exceeded max_tokens or + /// the + /// conversation exceeded the max context length. + ///
+ [JsonPropertyName("response_format")] + public ResponseFormatOneOfType? ResponseFormat { get; set; } } \ No newline at end of file diff --git a/OpenAI.SDK/ObjectModels/RequestModels/CreateThreadAndRunRequest.cs b/OpenAI.SDK/ObjectModels/RequestModels/CreateThreadAndRunRequest.cs index 6a93821a..ce920599 100644 --- a/OpenAI.SDK/ObjectModels/RequestModels/CreateThreadAndRunRequest.cs +++ b/OpenAI.SDK/ObjectModels/RequestModels/CreateThreadAndRunRequest.cs @@ -3,7 +3,7 @@ namespace OpenAI.ObjectModels.RequestModels; -public class CreateThreadAndRunRequest : IOpenAiModels.IAssistantId +public class CreateThreadAndRunRequest : IOpenAiModels.IAssistantId,IOpenAiModels.IModel { /// /// The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. diff --git a/OpenAI.SDK/ObjectModels/RequestModels/SubmitToolOutputsToRunRequest.cs b/OpenAI.SDK/ObjectModels/RequestModels/SubmitToolOutputsToRunRequest.cs index 6e5be108..1e86de06 100644 --- a/OpenAI.SDK/ObjectModels/RequestModels/SubmitToolOutputsToRunRequest.cs +++ b/OpenAI.SDK/ObjectModels/RequestModels/SubmitToolOutputsToRunRequest.cs @@ -11,6 +11,12 @@ public class SubmitToolOutputsToRunRequest [Required] [JsonPropertyName("tool_outputs")] public List ToolOutputs { get; set; } + + /// + /// If true, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a data: [DONE] message. + /// + [JsonPropertyName("stream")] + public bool? Stream { get; set; } } /// diff --git a/OpenAI.SDK/OpenAI.csproj b/OpenAI.SDK/OpenAI.csproj index 55c75dd9..4c2e1e0f 100644 --- a/OpenAI.SDK/OpenAI.csproj +++ b/OpenAI.SDK/OpenAI.csproj @@ -10,7 +10,7 @@ OpenAI-Betalgo.png true OpenAI SDK by Betalgo - 8.3.0 + 8.4.0 Tolga Kayhan, Betalgo Betalgo Up Ltd. OpenAI ChatGPT, Whisper, GPT-4 and DALL·E dotnet SDK diff --git a/Readme.md b/Readme.md index 9309f72f..5ecdecbe 100644 --- a/Readme.md +++ b/Readme.md @@ -32,7 +32,8 @@ Install-Package Betalgo.OpenAI.Utilities Maintenance of this project is made possible by all the bug reporters, [contributors](https://github.com/betalgo/openai/graphs/contributors), and [sponsors](https://github.com/sponsors/kayhantolga). 💖 Sponsors: -[@betalgo](https://github.com/betalgo), [Laser Cat Eyes](https://lasercateyes.com/), [@tylerje](https://github.com/tylerje), [@oferavnery](https://github.com/oferavnery), [@MayDay-wpf](https://github.com/MayDay-wpf), [@AnukarOP](https://github.com/AnukarOP), [@Removable](https://github.com/Removable), [@Scar11](https://github.com/Scar11) +[@betalgo](https://github.com/betalgo), [Laser Cat Eyes](https://lasercateyes.com/) +[@tylerje](https://github.com/tylerje), [@oferavnery](https://github.com/oferavnery), [@MayDay-wpf](https://github.com/MayDay-wpf), [@AnukarOP](https://github.com/AnukarOP), [@Removable](https://github.com/Removable), [@Scar11](https://github.com/Scar11) --- @@ -106,29 +107,22 @@ if (completionResult.Successful) --- ## Notes -#### Library Renaming -This library was previously known as `Betalgo.OpenAI.GPT3`, and now it has a new package ID: `Betalgo.OpenAI`. - Due to time constraints, not all methods have been thoroughly tested or fully documented. If you encounter any issues, please report them or submit a pull request. Your contributions are always appreciated. -I initially developed this SDK for my personal use and decided to share it with the community. As I have not maintained open-source projects before, any assistance or feedback would be greatly appreciated. Feel free to reach out with your suggestions. - -Please be aware that future releases may frequently include breaking changes. Consider this before deciding to use the library. I cannot accept responsibility for any damage caused by using the library. You are free to explore alternative libraries or the OpenAI Web-API if you feel this is not suitable for your purposes. - +Needless to say, I cannot accept responsibility for any damage caused by using the library. ## Changelog -### 8.3.0 -- Updated Assistant tests, added sample for CreateMessageWithImage -- Azure Assistant endpoints are updated since documentation reference still earlier version (Assistant v1). I am not sure if Azure supports all Assistant v2 features. So, feedback is much appreciated. -- Fixed error handling and response parsing for audio transcription result in text mode. -- Fixed Culture issue for number conversions (Audio Temperature and Image N) -- Removed file_ids from Create Assistant -- Added Support for Chat LogProbs -- Fixed File_Id Typo in file VisionImageUrl -- Updated File purpose enum list +### 8.4.0 +- Added Stream support for submitToolOutputsToRun, createRun, and createThreadAndRun +- 🎉 With this update, we are now in sync with OpenAI's latest API changes. We shouldn't have any missing features as of now. ### [More Change Logs](https://github.com/betalgo/openai/wiki/Change-Logs) --- For any issues, contributions, or feedback, feel free to reach out or submit a pull request. + +Betalgo X: [Betalgo (@Betalgo) / X (twitter.com)](https://twitter.com/Betalgo) +Betalgo Linkedin: [Betalgo | LinkedIn](https://www.linkedin.com/company/betalgo-up ) +Tolga X: [Tolga Kayhan (@kayhantolga) / X (twitter.com)](https://twitter.com/kayhantolga) +Tolga Linkedin: [Tolga Kayhan | LinkedIn](https://www.linkedin.com/in/kayhantolga/) \ No newline at end of file