-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
route.ts
141 lines (132 loc) · 4.5 KB
/
route.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import { Ratelimit } from "@upstash/ratelimit";
import { kv } from "@vercel/kv";
import { OpenAIStream, StreamingTextResponse } from "ai";
import OpenAI from "openai";
import type { ChatCompletionMessageParam } from "openai/resources/index.mjs";
import { match } from "ts-pattern";
// Create an OpenAI API client (that's edge friendly!)
// IMPORTANT! Set the runtime to edge: https://vercel.com/docs/functions/edge-functions/edge-runtime
export const runtime = "edge";
export async function POST(req: Request): Promise<Response> {
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
baseURL: process.env.OPENAI_BASE_URL || "https://api.openai.com/v1",
});
// Check if the OPENAI_API_KEY is set, if not return 400
if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === "") {
return new Response("Missing OPENAI_API_KEY - make sure to add it to your .env file.", {
status: 400,
});
}
if (process.env.KV_REST_API_URL && process.env.KV_REST_API_TOKEN) {
const ip = req.headers.get("x-forwarded-for");
const ratelimit = new Ratelimit({
redis: kv,
limiter: Ratelimit.slidingWindow(50, "1 d"),
});
const { success, limit, reset, remaining } = await ratelimit.limit(`novel_ratelimit_${ip}`);
if (!success) {
return new Response("You have reached your request limit for the day.", {
status: 429,
headers: {
"X-RateLimit-Limit": limit.toString(),
"X-RateLimit-Remaining": remaining.toString(),
"X-RateLimit-Reset": reset.toString(),
},
});
}
}
const { prompt, option, command } = await req.json();
const messages = match(option)
.with("continue", () => [
{
role: "system",
content:
"You are an AI writing assistant that continues existing text based on context from prior text. " +
"Give more weight/priority to the later characters than the beginning ones. " +
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: prompt,
},
])
.with("improve", () => [
{
role: "system",
content:
"You are an AI writing assistant that improves existing text. " +
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("shorter", () => [
{
role: "system",
content:
"You are an AI writing assistant that shortens existing text. " + "Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("longer", () => [
{
role: "system",
content:
"You are an AI writing assistant that lengthens existing text. " +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("fix", () => [
{
role: "system",
content:
"You are an AI writing assistant that fixes grammar and spelling errors in existing text. " +
"Limit your response to no more than 200 characters, but make sure to construct complete sentences." +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `The existing text is: ${prompt}`,
},
])
.with("zap", () => [
{
role: "system",
content:
"You area an AI writing assistant that generates text based on a prompt. " +
"You take an input from the user and a command for manipulating the text" +
"Use Markdown formatting when appropriate.",
},
{
role: "user",
content: `For this text: ${prompt}. You have to respect the command: ${command}`,
},
])
.run() as ChatCompletionMessageParam[];
const response = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
stream: true,
messages,
temperature: 0.7,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
n: 1,
});
// Convert the response into a friendly text-stream
const stream = OpenAIStream(response);
// Respond with the stream
return new StreamingTextResponse(stream);
}