Skip to content

Commit

Permalink
Merge pull request #20 from DHclly/main
Browse files Browse the repository at this point in the history
优化代码结构和处理部分bug
  • Loading branch information
239573049 authored Jul 15, 2024
2 parents aa8bf31 + 96d32f0 commit 5bc0fb7
Show file tree
Hide file tree
Showing 131 changed files with 3,010 additions and 2,222 deletions.
7 changes: 7 additions & 0 deletions Thor.sln
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thor.LocalMemory.Cache", "s
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thor.RedisMemory.Cache", "src\framework\Thor.RedisMemory.Cache\Thor.RedisMemory.Cache.csproj", "{84C27CF3-BD3C-46D6-A774-967DB5D3F060}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Thor.Moonshot", "src\extensions\Thor.Moonshot\Thor.Moonshot.csproj", "{4D98C74B-D071-430C-9956-3A9B8CF642DD}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Expand Down Expand Up @@ -119,6 +121,10 @@ Global
{84C27CF3-BD3C-46D6-A774-967DB5D3F060}.Debug|Any CPU.Build.0 = Debug|Any CPU
{84C27CF3-BD3C-46D6-A774-967DB5D3F060}.Release|Any CPU.ActiveCfg = Release|Any CPU
{84C27CF3-BD3C-46D6-A774-967DB5D3F060}.Release|Any CPU.Build.0 = Release|Any CPU
{4D98C74B-D071-430C-9956-3A9B8CF642DD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{4D98C74B-D071-430C-9956-3A9B8CF642DD}.Debug|Any CPU.Build.0 = Debug|Any CPU
{4D98C74B-D071-430C-9956-3A9B8CF642DD}.Release|Any CPU.ActiveCfg = Release|Any CPU
{4D98C74B-D071-430C-9956-3A9B8CF642DD}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
Expand All @@ -142,6 +148,7 @@ Global
{F78FCCB8-19E6-43E9-91EB-765257722DD5} = {1035B36B-8194-43D4-8A4A-992D962CD1D8}
{42402B28-DDC4-4294-A444-6D8B3C43B934} = {1035B36B-8194-43D4-8A4A-992D962CD1D8}
{84C27CF3-BD3C-46D6-A774-967DB5D3F060} = {1035B36B-8194-43D4-8A4A-992D962CD1D8}
{4D98C74B-D071-430C-9956-3A9B8CF642DD} = {294B7A34-48F5-43AB-A1D3-033480559E3A}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {8A80931C-B951-4EF2-AC79-457E73118E5F}
Expand Down
12 changes: 12 additions & 0 deletions src/Thor.Abstractions/Audios/IThorAudioService.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace Thor.Abstractions.Audios;

public interface IThorAudioService
{

}
16 changes: 0 additions & 16 deletions src/Thor.Abstractions/ChatOptions.cs

This file was deleted.

51 changes: 51 additions & 0 deletions src/Thor.Abstractions/Chats/Consts/ThorChatMessageRoleConst.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace Thor.Abstractions.Chats.Consts
{
/// <summary>
/// 对话消息角色定义
/// </summary>
public class ThorChatMessageRoleConst
{
/// <summary>
/// 系统角色
/// <para>
/// 用于为聊天助手分配特定的行为或上下文,以影响对话的模型行为。
/// 例如,可以将系统角色设定为“您是足球专家”,
/// 那么 ChatGPT 在对话中会表现出特定的个性或专业知识。
/// </para>
/// </summary>
public static string System => "system";

/// <summary>
/// 用户角色
/// <para>
/// 代表实际的最终用户,向 ChatGPT 发送提示或消息,
/// 用于指示消息/提示来自最终用户或人类。
/// </para>
/// </summary>
public static string User => "user";

/// <summary>
/// 助手角色
/// <para>
/// 表示对最终用户提示的响应实体,用于保持对话的连贯性。
/// 它是由模型自动生成并回复的,用于设置模型的先前响应,以继续对话流程。
/// </para>
/// </summary>
public static string Assistant => "assistant";

/// <summary>
/// 工具角色
/// <para>
/// 表示对最终用户提示的响应实体,用于保持对话的连贯性。
/// 它是由模型自动生成并回复的,用于设置模型的先前响应,以继续对话流程。
/// </para>
/// </summary>
public static string Tool => "tool";
}
}
24 changes: 24 additions & 0 deletions src/Thor.Abstractions/Chats/Consts/ThorMessageContentTypeConst.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace Thor.Abstractions.Chats.Consts
{
/// <summary>
/// 支持图片识别的消息体内容类型
/// </summary>
internal class ThorMessageContentTypeConst
{
/// <summary>
/// 文本内容
/// </summary>
public static string Text => "text";

/// <summary>
/// 图片 Url 类型
/// </summary>
public static string ImageUrl => "image_url";
}
}
Original file line number Diff line number Diff line change
@@ -1,46 +1,59 @@
using System.ComponentModel.DataAnnotations;
using System.Text.Json;
using System.Text.Json.Serialization;
using OpenAI.ObjectModels.RequestModels;
using Thor.Abstractions.ObjectModels.ObjectModels;
using Thor.Abstractions.ObjectModels.ObjectModels.RequestModels;
using Thor.Abstractions.ObjectModels.ObjectModels.SharedModels;

namespace Thor.Abstractions.ObjectModels.ObjectModels.RequestModels;
namespace Thor.Abstractions.Chats.Dtos;

public class ChatCompletionCreateRequest : IOpenAiModels.ITemperature, IOpenAiModels.IModel, IOpenAiModels.IUser
/// <summary>
/// 对话补全请求参数对象
/// </summary>
public class ThorChatCompletionsRequest : IOpenAiModels.ITemperature, IOpenAiModels.IModel, IOpenAiModels.IUser
{
public enum ResponseFormats
public ThorChatCompletionsRequest()
{
Text,
Json
Messages = new List<ThorChatMessage>();
}

/// <summary>
/// The messages to generate chat completions for, in the chat format.
/// The main input is the messages parameter. Messages must be an array of message objects, where each object has a
/// role (either “system”, “user”, or “assistant”) and content (the content of the message). Conversations can be as
/// short as 1 message or fill many pages.
/// 包含迄今为止对话的消息列表
/// </summary>
[JsonPropertyName("messages")]
public IList<ChatMessage> Messages { get; set; }
public List<ThorChatMessage> Messages { get; set; }

/// <summary>
/// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the
/// tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are
/// considered.
/// We generally recommend altering this or temperature but not both.
/// 模型唯一编码值,如 gpt-4,gpt-3.5-turbo,moonshot-v1-8k,看底层具体平台定义
/// </summary>
[JsonPropertyName("model")]
public string Model { get; set; }

/// <summary>
/// 温度采样的替代方法称为核采样,介于 0 和 1 之间,其中模型考虑具有 top_p 概率质量的标记的结果。
/// 因此 0.1 意味着仅考虑包含前 10% 概率质量的标记。
/// 我们通常建议更改此项或 temperature ,但不要同时更改两者。
/// </summary>
[JsonPropertyName("top_p")]
public float? TopP { get; set; }

/// <summary>
/// How many chat completion choices to generate for each input message.
/// 使用什么采样温度,介于 0 和 2 之间。
/// 较高的值(如 0.8)将使输出更加随机,而较低的值(如 0.2)将使其更加集中和确定性。
/// 我们通常建议更改此项或 top_p ,但不要同时更改两者。
/// </summary>
[JsonPropertyName("temperature")]
public float? Temperature { get; set; }

/// <summary>
/// 为每条输入消息生成多少个聊天完成选项。请注意,您将根据所有选项生成的代币数量付费。将 n 保留为 1 以最大限度地降低成本。
/// </summary>
[JsonPropertyName("n")]
public int? N { get; set; }
public int? N { get; set; } = 1;

/// <summary>
/// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events
/// as they become available, with the stream terminated by a data: [DONE] message.
/// 如果设置,将发送部分消息增量,就像在 ChatGPT 中一样。
/// 令牌可用时将作为仅数据服务器发送事件发送,流由 data: [DONE] 消息终止。
/// </summary>
[JsonPropertyName("stream")]
public bool? Stream { get; set; }
Expand Down Expand Up @@ -150,7 +163,7 @@ public object? ToolsCalculated
{
if (jsonElement.ValueKind == JsonValueKind.Array)
{
Tools = JsonSerializer.Deserialize<List<ToolDefinition>>(jsonElement.GetRawText(),ThorJsonSerializer.DefaultOptions);
Tools = JsonSerializer.Deserialize<List<ToolDefinition>>(jsonElement.GetRawText(), ThorJsonSerializer.DefaultOptions);
}
else
{
Expand Down Expand Up @@ -220,49 +233,6 @@ public object? ToolChoiceCalculated
[JsonPropertyName("response_format")]
public ResponseFormat? ResponseFormat { get; set; }

/// <summary>
/// The format that the model must output. Used to enable JSON mode.
/// Must be one of "text" or "json_object".
/// </summary>
/// <example>
/// This example shows how to set the ChatResponseFormat to JSON:
/// <code>
/// var chatResponse = new ChatResponse
/// {
/// ChatResponseFormat = ChatResponseFormats.Json
/// };
/// </code>
/// </example>
/// <exception cref="ArgumentOutOfRangeException">
/// Thrown when an unsupported <see cref="ResponseFormats" /> value is provided.
/// </exception>
/// <exception cref="ValidationException">
/// Thrown when <see cref="ResponseFormat" /> is already set.
/// </exception>
[JsonIgnore]
public ResponseFormats? ChatResponseFormat
{
set
{
if (value == null) return;
if (ResponseFormat?.Type != null)
{
throw new ValidationException(
"ResponseFormat and ChatResponseFormat can not be assigned at the same time. One of them is should be null.");
}

ResponseFormat = new ResponseFormat
{
Type = value switch
{
ResponseFormats.Json => StaticValues.CompletionStatics.ResponseFormat.Json,
ResponseFormats.Text => StaticValues.CompletionStatics.ResponseFormat.Text,
_ => throw new ArgumentOutOfRangeException(nameof(value), value, null)
}
};
}
}

/// <summary>
/// This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that
/// repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed,
Expand All @@ -271,25 +241,11 @@ public ResponseFormats? ChatResponseFormat
[JsonPropertyName("seed")]
public int? Seed { get; set; }

/// <summary>
/// ID of the model to use. For models supported see <see cref="Models" /> start with <c>Gpt_</c>
/// </summary>
[JsonPropertyName("model")]
public string? Model { get; set; }

public IEnumerable<ValidationResult> Validate()
{
throw new NotImplementedException();
}

/// <summary>
/// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while
/// lower values like 0.2 will make it more focused and deterministic.
/// We generally recommend altering this or top_p but not both.
/// </summary>
[JsonPropertyName("temperature")]
public float? Temperature { get; set; }

/// <summary>
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
/// </summary>
Expand Down
30 changes: 30 additions & 0 deletions src/Thor.Abstractions/Chats/Dtos/ThorChatCompletionsResponse.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
using System.Text.Json.Serialization;
using Thor.Abstractions.ObjectModels.ObjectModels.ResponseModels;
using Thor.Abstractions.ObjectModels.ObjectModels.SharedModels;

namespace Thor.Abstractions.Chats.Dtos;

/// <summary>
/// 对话补全服务返回结果
/// </summary>
public record ThorChatCompletionsResponse
: BaseResponse
{
[JsonPropertyName("model")]
public string? Model { get; set; }

[JsonPropertyName("choices")]
public List<ChatChoiceResponse>? Choices { get; set; }

[JsonPropertyName("usage")]
public UsageResponse? Usage { get; set; }

[JsonPropertyName("created")]
public int CreatedAt { get; set; }

[JsonPropertyName("id")]
public string Id { get; set; }

[JsonPropertyName("system_fingerprint")]
public string SystemFingerPrint { get; set; }
}
Loading

0 comments on commit 5bc0fb7

Please sign in to comment.