Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use system messages to shape the model output #38

Merged
merged 22 commits into from
May 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 13 additions & 6 deletions src/services/gpt4Service.js → src/services/OpenAiGptService.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,14 @@ import { Configuration, OpenAIApi } from "openai"
import CliState from "../cliState.js";
import ConfigService from "./configService.js"
import { encode } from "gpt-3-encoder"
import SystemMessage from "./SystemMessage.js";

export default class OpenAiGptService {

static async call(prompt, model, requestJsonOutput = true) {
if (model == "gpt3") model = "gpt-3.5-turbo";
if (model == "gpt4") model = "gpt-4";

export default class Gpt4Service {
static async call(prompt) {
const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY
})
Expand All @@ -13,16 +18,18 @@ export default class Gpt4Service {

const config = await ConfigService.retrieveConfig();
const encoded = encode(prompt)
if (verbose) console.log(`Prompt token count: ${encoded.length}`)
const messages = requestJsonOutput ? [{role: "user", content: prompt }, ...SystemMessage.systemMessages()] : [{role: "user", content: prompt }]
if (verbose) console.log(`Prompt token count: ${encoded.length}\n\nMessages sent to the OpenAI API:\n${messages.map(m => `\n${m.role}\n--------\n${m.content}`).join("\n================\n\n")}\n\n`)
const response = await openai.createChatCompletion({
model: "gpt-4",
model: model,
temperature: config.api.temperature,
messages: [{role: "user", content: prompt }],
messages: messages,
});

if (!response?.data?.choices) return null
let result = response.data.choices.map((d) => d?.message?.content?.trim()).join()
if (verbose) console.log(`--Response--\n${result}`)
return result
}
}

}
20 changes: 20 additions & 0 deletions src/services/SystemMessage.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
export default class SystemMessage {

static systemMessages() {
return [{ role: "system", content: `You are a creative and helpful software engineer.
Your response should be entirely valid json with no other content outside of the json.
Do not include file contents or any other words before or after the json.
Do not respond with anything but json.
The json should be an object with an "operations" key.
The "operations" key should be an array of objects.
Each object should represent a file that should be created, updated, or deleted.
Each object should have three keys: "crudOperation", "filePath", and "fileContents".
The "crudOperation" value should contain the operation that you would like to perform for the given file. The "crudOperation" value should be "create", "update", or "delete".
The "filePath" value should contain the path to the file.
The "fileContents" value should be the contents of the file if the file is being created or updated - if the file is being deleted then the "fileContents" key can be omitted.
Make sure that the "fileContents" value is delimitted correctly as a json string.
Only include changed files in your response.
Don't abbreviate file contents - include the whole file for the "fileContents" value.` }]
}

}
35 changes: 0 additions & 35 deletions src/services/gpt3Service.js

This file was deleted.

24 changes: 10 additions & 14 deletions src/services/pluginService.js
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
import { fileURLToPath } from 'url'
import { dirname } from 'path'
import { encode } from "gpt-3-encoder"
import Gpt3Service from './gpt3Service.js'
import { FileService } from './fileService.js'
import CliState from '../cliState.js'
import Gpt4Service from './gpt4Service.js'
import OpenAiGptService from './OpenAiGptService.js'
import RefactorResultProcessor from './refactorResultProcessor.js'
import TemplateLoader from './templateLoaderService.js'
import PromptContext from './promptContext.js'
Expand Down Expand Up @@ -34,7 +33,6 @@ export default class PluginService {
if (verbose) console.log(`Template path is: ${templatePath}`)

prompt = await TemplateLoader.loadTemplate(userInput.toString().trim(), context, templatePath)
if (verbose) console.log(`Prompt: \n${prompt}\n\n`)

if (CliState.isDryRun()) {
console.log(prompt)
Expand All @@ -53,8 +51,8 @@ export default class PluginService {
return 0
}

if (CliState.getTemplatePath() === "refactor" || !CliState.getTemplatePath()) {
if (verbose) console.log(`Executing: \n${output}\n\n`)
if (this.shouldRefactor(CliState.getTemplatePath())) {
if (verbose) console.log(`Executing:\n${output}`)
const operations = extractOperationsFromOutput(output)
if (CliState.isDryRun()) {
console.log(operations)
Expand Down Expand Up @@ -86,14 +84,12 @@ export default class PluginService {
await this.processPipedInput()
return "Changes applied"
}
if (model === "gpt3") {
return await Gpt3Service.call(prompt)
}
if (model === "gpt4") {
return await Gpt4Service.call(prompt)
}
console.log(`model ${model} is not supported`)
exit(1)

const shouldRefactor = this.shouldRefactor(CliState.getTemplatePath())
return await OpenAiGptService.call(prompt, model, shouldRefactor)
}

}
static shouldRefactor(templatePath) {
return templatePath === "refactor" || !templatePath
}
}
2 changes: 1 addition & 1 deletion src/services/templateUrl.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
class TemplateUrl {
static refactor = 'https://gist.githubusercontent.com/ferrislucas/c6a89d2a37d7e3a0882f1f3ed1cab2a7/raw/6ff336a91768572c294c714378992e35a127b787/promptr-refactor-template-v3.0.3';
static refactor = 'https://gist.githubusercontent.com/ferrislucas/f474ea2d1438c9740b29281c79c162ba/raw/529f65f7ea03f15b7f43f0afacd1ee62116eefa7/promptr-refactor-template-v4.0.0';
static empty = 'https://gist.githubusercontent.com/ferrislucas/e43ce36b49f37efe28e7414de4b71399/raw/7ef0afd5a094d392c255d7c0a98f6572dfc4bece/promptr-empty-template-v3.0.2';
static swe = 'https://gist.githubusercontent.com/ferrislucas/a6a18fdafe32910c95829a700c0887ed/raw/50e533d2db8e7e138bfa925739e5e1f5c4498e95/promptr-swe-template-v3.0.2';
static testFirst = 'https://gist.githubusercontent.com/ferrislucas/5d38034e1eefaec0a3d32bdbca3a9ac6/raw/48f1a47d179f568cf1d1fa9271d5ad13fbdc3c85/promptr-test-first-template-v3.0.2';
Expand Down
123 changes: 123 additions & 0 deletions test/OpenAiGptService.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
import assert from 'assert';
import sinon from 'sinon';
import OpenAiGptService from '../src/services/OpenAiGptService.js';
import { Configuration, OpenAIApi } from 'openai';
import ConfigService from '../src/services/configService.js';
import CliState from '../src/cliState.js';
import SystemMessage from '../src/services/SystemMessage.js';

describe('OpenAiGptService', () => {
beforeEach(() => {
CliState.init([], '')
});
afterEach(() => {
sinon.restore();
});

it('should return the result from the model', async () => {
const prompt = 'What is the capital of France?';
const expectedResult = 'The capital of France is Paris.';
const model = 'gpt-4';

const configStub = sinon.stub(ConfigService, 'retrieveConfig').resolves({ api: { temperature: 0.5 } });
const openaiStub = sinon.stub(OpenAIApi.prototype, 'createChatCompletion').resolves({
data: {
choices: [
{ message: { content: expectedResult } }
]
}
});

const result = await OpenAiGptService.call(prompt, model);

assert.strictEqual(result, expectedResult);
sinon.assert.calledOnce(configStub);
sinon.assert.calledOnce(openaiStub);
});

it('should return null when the response does not contain choices', async () => {
const prompt = 'What is the capital of France?';
const model = 'gpt-4';

const configStub = sinon.stub(ConfigService, 'retrieveConfig').resolves({ api: { temperature: 0.5 } });
const openaiStub = sinon.stub(OpenAIApi.prototype, 'createChatCompletion').resolves({
data: {}
});

const result = await OpenAiGptService.call(prompt, model);

assert.strictEqual(result, null);
sinon.assert.calledOnce(configStub);
sinon.assert.calledOnce(openaiStub);
});

it('should append system messages in the call to openai.createChatCompletion when requestJsonOutput is true', async () => {
const prompt = 'What is the capital of France?';
const expectedResult = 'The capital of France is Paris.';
const model = 'gpt-4';
const configStub = sinon.stub(ConfigService, 'retrieveConfig').resolves({ api: { temperature: 0.5 } })
const openaiStub = sinon.stub(OpenAIApi.prototype, 'createChatCompletion').resolves({
data: {
choices: [
{ message: { content: expectedResult } }
]
}
});

await OpenAiGptService.call(prompt, model, true);

sinon.assert.calledOnce(configStub);
sinon.assert.calledWith(openaiStub, sinon.match({
messages: sinon.match.array.deepEquals([
{ role: 'user', content: prompt },
...SystemMessage.systemMessages()
])
}));
});

it('should not append system messages in the call to openai.createChatCompletion when requestJsonOutput is false', async () => {
const prompt = 'What is the capital of France?';
const expectedResult = 'The capital of France is Paris.';
const model = 'gpt-4';
const configStub = sinon.stub(ConfigService, 'retrieveConfig').resolves({ api: { temperature: 0.5 } })
const openaiStub = sinon.stub(OpenAIApi.prototype, 'createChatCompletion').resolves({
data: {
choices: [
{ message: { content: expectedResult } }
]
}
});

await OpenAiGptService.call(prompt, model, false);

sinon.assert.calledOnce(configStub);
sinon.assert.calledWith(openaiStub, sinon.match({
messages: sinon.match.array.deepEquals([
{ role: 'user', content: prompt }
])
}));
});

it('should pass the correct model value to openai.createChatCompletion', async () => {
const prompt = 'What is the capital of France?';
const expectedResult = 'The capital of France is Paris.';
const models = ['gpt3', 'gpt4'];
const expectedModels = ['gpt-3.5-turbo', 'gpt-4'];

const configStub = sinon.stub(ConfigService, 'retrieveConfig').resolves({ api: { temperature: 0.5 } });
const openaiStub = sinon.stub(OpenAIApi.prototype, 'createChatCompletion').resolves({
data: {
choices: [
{ message: { content: expectedResult } }
]
}
});

for (let i = 0; i < models.length; i++) {
await OpenAiGptService.call(prompt, models[i]);
sinon.assert.calledWith(openaiStub.getCall(i), sinon.match({ model: expectedModels[i] }));
}

sinon.assert.callCount(openaiStub, models.length);
});
});
30 changes: 0 additions & 30 deletions test/gpt3Service.test.js

This file was deleted.

46 changes: 0 additions & 46 deletions test/gpt4Service.test.js

This file was deleted.

Loading