From 9c28f0d1c39506108a36a587aa18a227218067e9 Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Sun, 16 Jun 2024 08:43:14 +0800 Subject: [PATCH] tidy --- README.md | 20 +++++++++++++------- pyproject.toml | 4 ++-- shgpt/api/ollama.py | 15 +++++++++------ shgpt/app.py | 13 ++++++------- shgpt/tui/app.py | 5 ++--- shgpt/utils/conf.py | 10 ++++------ 6 files changed, 36 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index d26f069..44d99ca 100644 --- a/README.md +++ b/README.md @@ -7,11 +7,16 @@ Chat with LLM in your terminal, be it shell generator, story teller, linux-terminal, etc. # Install -``` +```bash pip install -U shgpt ``` -This will install two commands: `sg` and `shgpt`, which are identical. +Or install latest version +```bash +pip install --force-reinstall -U git+https://github.com/jiacai2050/shellgpt.git +``` + +This will install two commands: `sg` and `shellgpt`, which are identical. After install, use `sg --init` to create required directories(mainly `~/.shellgpt`). @@ -49,12 +54,13 @@ There are 3 key bindings to use in TUI: ## Role -There are 4 built-in [system role contents](https://platform.openai.com/docs/guides/text-generation/chat-completions-api) in shellgpt: +There are some built-in [system role contents](https://platform.openai.com/docs/guides/text-generation/chat-completions-api) in shellgpt: - `default`, used for ask general questions +- `typo`, used for correct article typos. +- `slug`, used for generate URL slug. - `code`, used for ask programming questions - `shell`, used for infer shell command -- `typo`, used for correct article typos. -- `cm`, used for generate git commit message, like `git diff | sg -r cm` +- `commit`, used for generate git commit message, like `git diff | sg -r commit` Users can define their own content in `~/.shellgpt/roles.json`, it a JSON map with - key being role name and @@ -63,10 +69,10 @@ Users can define their own content in `~/.shellgpt/roles.json`, it a JSON map wi Or you can just copy [roles.json](https://github.com/jiacai2050/shellgpt/blob/main/roles.json) to play with, it's generated from [Awesome ChatGPT Prompts](https://github.com/f/awesome-chatgpt-prompts/blob/main/prompts.csv). ```bash -$ shgpt -r linux-terminal pwd +$ sg -r linux-terminal pwd /home/user -$ shgpt -r javascript-console 0.1 + 0.2 +$ sg -r javascript-console 0.1 + 0.2 0.3 ``` diff --git a/pyproject.toml b/pyproject.toml index 6e38fbe..9e80d3e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ dynamic = ["version"] authors = [{name = "Jiacai Liu", email="dev@liujiacai.net"}] description = "Chat with LLM in your terminal, be it shell generator, story teller, linux-terminal, etc." readme = "README.md" -keywords = ["llm", "shell", "gpt"] +keywords = ["ai", "llm", "shell", "gpt"] license = "GPL-3.0" requires-python = ">=3.0.0" dependencies = [ @@ -40,7 +40,7 @@ Issues = "https://github.com/jiacai2050/shellgpt/issues" [project.scripts] sg = "shgpt:main" -shgpt = "shgpt:main" +shellgpt = "shgpt:main" [tool.hatch.version] path = "shgpt/version.py" diff --git a/shgpt/api/ollama.py b/shgpt/api/ollama.py index ae7f21f..8eb14cd 100644 --- a/shgpt/api/ollama.py +++ b/shgpt/api/ollama.py @@ -7,7 +7,7 @@ # https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion class Ollama(object): - def __init__(self, base_url, key, role, temperature, timeout, max_messages): + def __init__(self, base_url, key, model, role, temperature, timeout, max_messages): session = TimeoutSession(timeout=timeout) if key is not None and key != '': session.headers = {'Authorization': f'Bearer {key}'} @@ -15,6 +15,7 @@ def __init__(self, base_url, key, role, temperature, timeout, max_messages): else: self.use_openai = False self.base_url = base_url + self.model = model self.http_session = session self.role = role self.temperature = temperature @@ -26,14 +27,15 @@ def __init__(self, base_url, key, role, temperature, timeout, max_messages): ) self.messages = [] - def chat(self, model, prompt, stream=True, add_system_message=True): + def chat(self, prompt, stream=True, add_system_message=True): return ( - self.chat_openai(model, prompt, stream, add_system_message) + self.chat_openai(prompt, stream, add_system_message) if self.use_openai - else self.chat_ollama(model, prompt, stream, add_system_message) + else self.chat_ollama(prompt, stream, add_system_message) ) - def chat_openai(self, model, prompt, stream, add_system_message): + def chat_openai(self, prompt, stream, add_system_message): + model = self.model url = f'{self.base_url}/v1/chat/completions' debug_print( f'chat: {prompt} to {url} with model {model} role {self.role} and stream {stream}' @@ -107,7 +109,8 @@ def make_messages(self, model, prompt, support_image, add_system_message): return msgs, model - def chat_ollama(self, model, prompt, stream, add_system_message): + def chat_ollama(self, prompt, stream, add_system_message): + model = self.model url = self.base_url + '/api/chat' debug_print( f'chat: {prompt} to {url} with model {model} role {self.role} and stream {stream}' diff --git a/shgpt/app.py b/shgpt/app.py index b418cfc..23389b8 100644 --- a/shgpt/app.py +++ b/shgpt/app.py @@ -36,11 +36,10 @@ def init_app(): class ShellGPT(object): def __init__(self, url, key, model, role, temperature, timeout, max_messages): self.is_shell = role == 'shell' - self.model = model - self.llm = Ollama(url, key, role, temperature, timeout, max_messages) + self.llm = Ollama(url, key, model, role, temperature, timeout, max_messages) def tui(self, history, initial_prompt): - app = ShellGPTApp(self.model, self.llm, history, initial_prompt) + app = ShellGPTApp(self.llm, history, initial_prompt) app.run() # return true when prompt is a set command @@ -56,7 +55,7 @@ def repl_action(self, prompt): sub_cmd = args[1] if sub_cmd == 'model': - self.model = args[2] + self.llm.model = args[2] return True return False @@ -70,7 +69,7 @@ def repl(self, initial_prompt): """) self.infer(initial_prompt) while True: - prompt = input(f'{self.model}> ') + prompt = input(f'{self.llm.model}> ') if self.repl_action(prompt): continue @@ -82,7 +81,7 @@ def infer(self, prompt): buf = '' try: - for r in self.llm.chat(self.model, prompt): + for r in self.llm.chat(prompt): buf += r if self.is_shell is False: print(r, end='', flush=True) @@ -106,7 +105,7 @@ def shell_action(self, cmd): action = action.upper() if action == 'E': for r in self.llm.chat( - self.model, f'Explain this command: {cmd}', add_system_message=False + f'Explain this command: {cmd}', add_system_message=False ): print(r, end='', flush=True) print() diff --git a/shgpt/tui/app.py b/shgpt/tui/app.py index 3f95810..fae87c7 100644 --- a/shgpt/tui/app.py +++ b/shgpt/tui/app.py @@ -50,8 +50,7 @@ class ShellGPTApp(App): Binding('ctrl+r', 'run', 'Run code block'), ] - def __init__(self, model, llm, history, initial_prompt): - self.model = model + def __init__(self, llm, history, initial_prompt): self.llm = llm self.history = history self.has_inflight_req = False @@ -59,7 +58,7 @@ def __init__(self, model, llm, history, initial_prompt): super().__init__() def on_mount(self) -> None: - self.title = f'ShellGPT({self.model})' + self.title = f'ShellGPT({self.llm.model})' def compose(self) -> ComposeResult: """Create child widgets for the app.""" diff --git a/shgpt/utils/conf.py b/shgpt/utils/conf.py index f3c8745..33f08b8 100644 --- a/shgpt/utils/conf.py +++ b/shgpt/utils/conf.py @@ -46,13 +46,11 @@ def get_shell_type(): Output the best matching shell commands without any other information, or any quotes. Make sure it's valid shell command. """.format(os_name=OS_NAME, shell=SHELL), - # commit message - 'cm': """ - Generate git commit message for this changes. - """, - 'typo': ''' + 'commit': "You are now a git commit message writer. I'll give you a list of changes, and you'll reply with a commit message that summarizes these changes in a clear and concise way, keeping the original formatting.", + 'typo': """ You are now an article correction assistant. You need to find out the input text in the spelling errors, incoherent places, can only return the corrected text, without any explanation. The output keeps the original format and language output, don't modify the style, and keey the code blocks unchanged. - ''' + """, + 'slug': 'You are now a slug generator. I will give you some sentences, and you will reply with a slug version of those sentences. A slug is a URL-friendly version of a title, where spaces are replaced with hyphens, and all characters are lowercased. Do not include any special characters, and keep the output in English.', }