Skip to content

Commit

Permalink
Merge pull request #1 from Marco-Ray/beta
Browse files Browse the repository at this point in the history
Beta
  • Loading branch information
Mnikley authored Jun 20, 2023
2 parents 52a5c46 + 8eb161a commit c628e84
Show file tree
Hide file tree
Showing 13 changed files with 340 additions and 43 deletions.
3 changes: 3 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

15 changes: 15 additions & 0 deletions .idea/Wox.Plugin.ChatGPT-master.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 22 additions & 0 deletions .idea/inspectionProfiles/Project_Default.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ Alternatively you can run the script directly via `python app.py [QUERY]`
## Configuration
- `api_key:` Your OpenAI API key
- `model:` Used model (default: `gpt-3.5-turbo`, check https://platform.openai.com/docs/models/)
- `max_tokens:` Maximum amount of returned tokens (longer = more expensive; default: `32`)
- `max_tokens:` Maximum amount of returned tokens (longer = more expensive; default: `512`)
- `temperature:` Increase randomness (default: `0.15`)
- `stream:`: Stream response or wait for entire processed text (default: `True`)
- `price_per_token:` Used for estimating costs (default: `0.002 / 1000` based on gpt-3.5-turbo)
Expand Down
48 changes: 41 additions & 7 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,15 @@
import time
import threading
import sys
import traceback
import webbrowser
import psutil
from markdown import markdown

from history import QueryDB

config = {
"api_key": "insert-your-openai-api-key-here",
"api_key": "insert-api-key-here",
"model": "gpt-3.5-turbo", # check https://platform.openai.com/docs/models/ for other models
"max_tokens": 512, # maximum amount of returned tokens
"temperature": 0.15, # increases randomness
Expand All @@ -22,7 +25,8 @@
"stop_after_one_request": False,
"done": False,
"completion_text": "",
"input_prompt": ""
"input_prompt": "",
"last_history_id": False,
}
openai.api_key = config["api_key"]
app = Flask(__name__, template_folder=".")
Expand All @@ -45,7 +49,7 @@ def shutdown_flask():
for p in psutil.process_iter():
if p.name().startswith("python"): # might need to exchange for sys.executable
if len(p.cmdline()) > 2 and p.cmdline()[1] == "app.py":
time.sleep(0.5)
time.sleep(1.5)
p.kill()


Expand Down Expand Up @@ -81,8 +85,10 @@ def openai_call_thread():
stream=config["stream"])
except Exception as exc:
config["done"] = True
config["status"] = f"<span class='error'>Error: {exc}</span>"
traceback_lines = traceback.format_exc().splitlines()
config["status"] = f"<span class='error'>{traceback_lines[-1]}</span>"
shutdown_flask()
return

if config["stream"]:
for event in response:
Expand Down Expand Up @@ -116,6 +122,11 @@ def openai_call_thread():
if not config["stop_after_one_request"]:
config["session_spent_text"] += f" (session: {round(config['session_spent'], 5)}$)"

# record query history
with QueryDB() as query_db:
query_db.insert_query(config)
config["last_history_id"] = query_db.cursor.lastrowid

# convert markdown to html
config["completion_text"] = markdown(config["completion_text"])

Expand All @@ -135,17 +146,38 @@ def openai_call(prompt: str = None):
return jsonify(status="Started API call in thread",
config={key: val for key, val in config.items()
if key not in ["api_key", "completion_text"]},
result=None)
result=config.get("response"))


@app.route('/update')
def update():
"""Routine to fetch data, started with setInterval(getResults, interval) in index.html"""
global config
return jsonify(status="Update interval running",
config={key: val for key, val in config.items()
if key not in ["api_key", "completion_text"]},
result=config["completion_text"])
result=config.get("completion_text"))


@app.route('/get_history')
def get_history():
with QueryDB() as query_db:
query_history = query_db.get_all()
return jsonify(status="Get history queries",
data=query_history)


@app.route('/get_query/<int:query_id>')
def get_query(query_id: int):
global config
with QueryDB() as query_db:
query = query_db.get_by_id(query_id-1)
return jsonify(status="Get query by id", data=query)


@app.route('/close_process', methods=['GET'])
def close_process():
shutdown_flask()
return jsonify(status='thread killed')


if __name__ == "__main__":
Expand All @@ -156,3 +188,5 @@ def update():

webbrowser.open("http://127.0.0.1:5000")
app.run(host="127.0.0.1", port=5000, debug=False)


40 changes: 40 additions & 0 deletions history.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import json
import sqlite3


class QueryDB:
def __enter__(self):
self.conn = sqlite3.connect('database.db')
self.cursor = self.conn.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS queries "
"(id INTEGER PRIMARY KEY,"
"input TEXT,"
"result TEXT,"
"cost REAL,"
"config JSON,"
"status TEXT)")
return self

def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()

def insert_query(self, config):
clean_config = {k: v for k, v in config.items() if k != "api_key"}
self.cursor.execute("INSERT INTO queries (input, result, cost, config, status) "
"VALUES (?, ?, ?, ?, ?)",
(clean_config['input_prompt'],
clean_config['completion_text'],
clean_config['session_spent_text'],
json.dumps(clean_config),
clean_config['status']))
self.conn.commit()

def get_all(self):
self.cursor.execute("SELECT * FROM queries ORDER BY id DESC")
results = self.cursor.fetchall()
return results

def get_by_id(self, query_id):
self.cursor.execute("SELECT * FROM queries WHERE id = ?", (query_id+1,))
query = self.cursor.fetchone()
return query
Loading

0 comments on commit c628e84

Please sign in to comment.