diff --git a/.github/workflows/code-review.yaml b/.github/workflows/code-review.yaml new file mode 100644 index 0000000..ed92b5c --- /dev/null +++ b/.github/workflows/code-review.yaml @@ -0,0 +1,29 @@ +name: GPT Code Review 🤖 + +permissions: + contents: read + pull-requests: write + +on: + pull_request: + types: + - opened + - reopened + - synchronize + +jobs: + review: + name: GPT Code Review 🤖 + runs-on: ubuntu-latest + steps: + - name: Checkout Repo 🛎️ + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: GPT Code Review 🤖 + uses: mattzcarey/code-review-gpt@v0.1.4-alpha + with: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + MODEL: 'gpt-3.5-turbo' + GITHUB_TOKEN: ${{ github.token }} diff --git a/randomfile.ts b/randomfile.ts new file mode 100644 index 0000000..d436b29 --- /dev/null +++ b/randomfile.ts @@ -0,0 +1,97 @@ +/* eslint-disable max-depth */ +/* eslint-disable complexity */ +// Reviewing multiple files inline > prioritising them > adding review comments +// Answer questions > get the comments on the PR (by me and the questioner) as context > answer the question as comment + +import jsesc from "jsesc"; + +import { modelInfo } from "../constants"; +import { AIModel } from "../llm/ai"; +import { buildReviewPrompt } from "../prompts/buildPrompt"; +import { ReviewFile } from "../types"; + +export class Chat { + ai: AIModel; + modelName: string; + constructor( + openaiApiKey: string, + openaiModelName?: string, + temperature?: string + ) { + this.modelName = openaiModelName ?? "gpt-4-1106-preview"; + this.ai = new AIModel({ + modelName: this.modelName, + apiKey: openaiApiKey, + temperature: temperature ? parseFloat(temperature) : 0, + }); + } + + private getMaxPromptLength = (modelName: string): number => { + const model = modelInfo.find((info) => info.model === modelName); + if (!model) { + throw new Error(`Model ${modelName} not found`); + } + + return model.maxPromptLength; + }; + + public getReview = async ( + patch: string + ): Promise => { + const prompt = buildReviewPrompt(patch); + const maxPromptLength = this.getMaxPromptLength(this.modelName); + + if (prompt.length > maxPromptLength) { + console.error( + `File ${prompt} is too large to review, skipping review for this file` + ); + + return undefined; + } + + try { + let jsonResponse = await this.ai.callModel(prompt); + jsonResponse = removeMarkdownJsonQuotes(jsonResponse); + + try { + return JSON.parse(jsonResponse) as ReviewFile[]; + } catch (parseError) { + console.error( + `Error parsing JSON: ${ + (parseError as Error).message + }. Escaping special characters and retrying.` + ); + + try { + // eslint-disable-next-line @typescript-eslint/no-unsafe-assignment, @typescript-eslint/no-unsafe-call + const escapedJsonResponse: string = jsesc(jsonResponse, { + json: true, + }); + + return JSON.parse(escapedJsonResponse) as ReviewFile[]; + } catch (escapeParseError) { + console.error( + `Error parsing escaped JSON: ${ + (escapeParseError as Error).message + }. Returning undefined.` + ); + + return undefined; + } + } + } catch (error) { + console.error( + `Error processing review data: ${(error as Error).message}` + ); + + return undefined; + } + }; +} + +const removeMarkdownJsonQuotes = (jsonString: string): string => { + return jsonString + .replace(/^`+\s*json\s*/, "") + .replace(/\s*`+$/, "") + .trim(); +}; \ No newline at end of file