Skip to content

Commit

Permalink
add ci lint
Browse files Browse the repository at this point in the history
  • Loading branch information
jiacai2050 committed Jun 1, 2024
1 parent f3ac66b commit 3b00050
Show file tree
Hide file tree
Showing 15 changed files with 176 additions and 98 deletions.
27 changes: 27 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
name: CI

on:
workflow_dispatch:
pull_request:
paths:
- '**.py'
- '**.yml'
- '**.toml'
push:
branches:
- main
paths:
- '**.py'
- '**.yml'
- '**.toml'

jobs:
pypi-publish:
name: Upload release to PyPI
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: chartboost/ruff-action@v1
with:
args: 'format --check'
- uses: chartboost/ruff-action@v1
10 changes: 9 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,15 @@ build: clean
clean:
rm -rf build dist shgpt.egg-info


lint:
ruff check
ruff format

shell:
hatch shell

roles:
@ python download-roles.py

.PHONY: tui repl build clean roles
.PHONY: tui repl build clean lint shell roles
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

[![](https://img.shields.io/pypi/v/shgpt)](https://pypi.org/project/shgpt/)

Chat with LLM for anything you like, be it shell generator, story teller, linux-terminal, etc. All without leaving your terminal!
Chat with LLM in your terminal, be it shell generator, story teller, linux-terminal, etc.

# Install
```
Expand Down Expand Up @@ -53,6 +53,7 @@ $ shgpt -r javascript-console 0.1 + 0.2
0.3

```

# Requirements
- [Ollama](https://ollama.com/), you need to download models before try shellgpt.

Expand Down
20 changes: 12 additions & 8 deletions download-roles.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,27 @@
import json, csv
import json
import csv
import subprocess

# https://github.com/f/awesome-chatgpt-prompts/
url = 'https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv'
url = "https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv"


def main():
out = subprocess.getoutput(f'curl -s {url}')
rdr = csv.reader(out.split('\n'))
out = subprocess.getoutput(f"curl -s {url}")
rdr = csv.reader(out.split("\n"))
roles = {}
for row in rdr:
name = row[0].replace(' ', '-').replace('`', '').replace('/', '-').lower()
if name == 'act':
name = row[0].replace(" ", "-").replace("`", "").replace("/", "-").lower()
if name == "act":
# skip first row
continue

content = row[1]
roles[name] = content

with open('roles.json', 'w') as f:
with open("roles.json", "w") as f:
f.write(json.dumps(roles, indent=4))

if __name__ == '__main__':

if __name__ == "__main__":
main()
38 changes: 20 additions & 18 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ build-backend = "hatchling.build"
name = "shgpt"
dynamic = ["version"]
authors = [{name = "Jiacai Liu", email="dev@liujiacai.net"}]
description = "Chat with LLM for anything you like, be it shell generator, story teller, linux-terminal, etc. All without leaving your terminal!"
description = "Chat with LLM in your terminal, be it shell generator, story teller, linux-terminal, etc."
readme = "README.md"
keywords = ["llm", "shell", "gpt"]
license = "GPL-3.0"
Expand All @@ -15,6 +15,22 @@ dependencies = [
"pyperclip",
"textual",
]
classifiers = [
"Operating System :: OS Independent",
"Topic :: Software Development",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
]

[project.urls]
Repository = "https://github.com/jiacai2050/shellgpt"
Expand All @@ -41,20 +57,6 @@ include = [
"shgpt",
]


classifiers = [
"Operating System :: OS Independent",
"Topic :: Software Development",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
]
# https://github.com/astral-sh/ruff?tab=readme-ov-file#configuration
[tool.ruff.lint]
ignore = ["F405", "F403"]
2 changes: 2 additions & 0 deletions shgpt/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
from .app import main

__all__ = ["main"]
2 changes: 1 addition & 1 deletion shgpt/__main__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .app import main


if __name__ == '__main__':
if __name__ == "__main__":
main()
5 changes: 2 additions & 3 deletions shgpt/api/history.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@


class FileHistory(object):
def __init__(self, path):
self.path = path
self.file = open(path, 'a+')
self.file = open(path, "a+")

def write(self, content):
self.file.write(content)
Expand All @@ -12,6 +10,7 @@ def write(self, content):
def close(self):
self.file.close()


class DummyHistory(object):
def write(self, content):
pass
Expand Down
36 changes: 20 additions & 16 deletions shgpt/api/ollama.py
Original file line number Diff line number Diff line change
@@ -1,51 +1,55 @@
import json, os
import json
import os
from ..utils.http import TimeoutSession
from ..utils.common import *
from ..utils.conf import *
from .history import DummyHistory, FileHistory

HIST_SEP = '=========='
HIST_SEP = "=========="


# https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion
class Ollama(object):
def __init__(self, base_url, model, role, timeout):
self.base_url = base_url
self.http_session = TimeoutSession(timeout=timeout)
if ENABLE_HISTORY:
self.history_file = FileHistory(os.path.join(CONF_PATH, 'history'))
self.history_file = FileHistory(os.path.join(CONF_PATH, "history"))
else:
self.history_file = DummyHistory()
self.model = model
self.role = role

def generate(self, prompt, stream=True):
url = self.base_url + '/api/chat'
debug_print(f"generate: {prompt} to {url} with model {self.model} role {self.role} and stream {stream}")
url = self.base_url + "/api/chat"
debug_print(
f"generate: {prompt} to {url} with model {self.model} role {self.role} and stream {stream}"
)
system_content = ROLE_CONTENT.get(self.role, self.role)
payload = {
'messages': [
{'role': 'system', 'content': system_content, 'name': 'ShellGPT'},
{'role': 'user', 'content': prompt, 'name': 'user'},
"messages": [
{"role": "system", "content": system_content, "name": "ShellGPT"},
{"role": "user", "content": prompt, "name": "user"},
],
'model': self.model,
'stream': stream
"model": self.model,
"stream": stream,
}
debug_print(f'Infer message: {payload}')
debug_print(f"Infer message: {payload}")
r = self.http_session.post(url, json=payload, stream=stream)
if r.status_code != 200:
raise Exception("Error: " + r.text)

answer = ''
answer = ""
for item in r.iter_content(chunk_size=None):
resp = json.loads(item)
if resp['done'] is False:
content = resp['message']['content']
if resp["done"] is False:
content = resp["message"]["content"]
answer += content
yield content
else:
self.history_file.write(fr'''{now_ms()},{resp['eval_duration']},{resp['eval_count']}
self.history_file.write(rf"""{now_ms()},{resp['eval_duration']},{resp['eval_count']}
{prompt}
{HIST_SEP}
{answer}
{HIST_SEP}
''')
""")
76 changes: 50 additions & 26 deletions shgpt/app.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
import argparse, sys, readline
import argparse
import sys
from os import makedirs
from .api.ollama import Ollama
from .version import __version__
from .utils.conf import *
from .utils.common import *
from .tui.app import ShellGPTApp
from functools import partial


def init_app():
print(f'Create {CONF_PATH}...')
print(f"Create {CONF_PATH}...")
makedirs(CONF_PATH, exist_ok=True)


def read_action(cmd):
if IS_TTY:
action = input("(E)xecute, (Y)ank or (C)ontinue(default): ")
action = input("(E)xecute, (Y)ank or Continue(default): ")
action = action.upper()
if action == "E":
print(execute_cmd(cmd))
Expand All @@ -31,33 +33,32 @@ def tui(self, initial_prompt):
app.run()

def repl(self, initial_prompt):
print(r'''
print(r"""
__ __ _ _ ___ _ _ _ ___ ___ _____
\ \ / /__| |__ ___ _ __ ___ | |_ ___ / __| |_ ___| | |/ __| _ \_ _|
\ \/\/ / -_) / _/ _ \ ' \/ -_) | _/ _ \ \__ \ ' \/ -_) | | (_ | _/ | |
\_/\_/\___|_\__\___/_|_|_\___| \__\___/ |___/_||_\___|_|_|\___|_| |_|
''')
""")
self.infer(initial_prompt)
while True:
prompt = input("> ")
if 'exit' == prompt:
if "exit" == prompt:
sys.exit(0)

self.infer(prompt)


def infer(self, prompt):
if prompt == "":
return

buf = ''
buf = ""
try:
for r in self.llm.generate(prompt):
buf += r
print(r, end='')
print(r, end="")

print()
if self.role == 'shell':
if self.role == "shell":
read_action(buf)
except Exception as e:
print(f"Error when infer: ${e}")
Expand All @@ -67,18 +68,39 @@ def main():
prog = sys.argv[0]
parser = argparse.ArgumentParser(
prog=prog,
description='Make Shell easy to use with power of LLM!')
parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-s', '--shell', action='store_true', help='Infer shell command')
parser.add_argument('-r', '--role', default='default', help='System role message')
parser.add_argument('-l', '--repl', action='store_true', help='Start interactive REPL')
parser.add_argument('-t', '--tui', action='store_true', help='Start TUI')
parser.add_argument('--init', action='store_true', help='Init config')
parser.add_argument('--timeout', type=int, help='Timeout for each inference request', default=INFER_TIMEOUT)
parser.add_argument('--ollama-url', default=OLLAMA_URL, help='Ollama URL')
parser.add_argument('-m', '--ollama-model', default='llama3', help='Ollama model')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose mode')
parser.add_argument('prompt', metavar='<prompt>', nargs='*')
description="Chat with LLM in your terminal, be it shell generator, story teller, linux-terminal, etc.",
)
parser.add_argument(
"-V", "--version", action="version", version="%(prog)s " + __version__
)
parser.add_argument(
"-s",
"--shell",
action="store_true",
help="System role set to `shell`",
)
parser.add_argument(
"-c",
"--code",
action="store_true",
help="System role set to `code`",
)
parser.add_argument("-r", "--role", default="default", help="System role to use")
parser.add_argument(
"-l", "--repl", action="store_true", help="Start interactive REPL"
)
parser.add_argument("-t", "--tui", action="store_true", help="Start TUI mode")
parser.add_argument("--init", action="store_true", help="Init config")
parser.add_argument(
"--timeout",
type=int,
help="Timeout for each inference request",
default=INFER_TIMEOUT,
)
parser.add_argument("--ollama-url", default=OLLAMA_URL, help="Ollama URL")
parser.add_argument("-m", "--ollama-model", default="llama3", help="Ollama model")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose mode")
parser.add_argument("prompt", metavar="<prompt>", nargs="*")
args = parser.parse_args()
set_verbose(args.verbose)

Expand All @@ -87,9 +109,9 @@ def main():
sys.exit(0)

sin = read_stdin()
prompt = ' '.join(args.prompt)
prompt = " ".join(args.prompt)
if sin is not None:
prompt = f'{sin}\n\n{prompt}'
prompt = f"{sin}\n\n{prompt}"

if args.repl:
app_mode = AppMode.REPL
Expand All @@ -101,7 +123,9 @@ def main():
role = args.role
# tui default to shell role
if args.shell or app_mode == AppMode.TUI:
role = 'shell'
role = "shell"
elif args.code:
role = "code"

if role not in ROLE_CONTENT:
try:
Expand Down
Loading

0 comments on commit 3b00050

Please sign in to comment.