From 040ee0b7ee40c9c2b3e43d9be807f8ad5baf5c8d Mon Sep 17 00:00:00 2001 From: jiacai2050 Date: Sat, 10 Aug 2024 23:09:26 +0800 Subject: [PATCH] !refactor: add system args --- shellgpt/api/llm.py | 15 ++++++--------- shellgpt/app.py | 19 +++++++++++++------ 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/shellgpt/api/llm.py b/shellgpt/api/llm.py index cdb1d2c..1c6dd00 100644 --- a/shellgpt/api/llm.py +++ b/shellgpt/api/llm.py @@ -37,14 +37,14 @@ def chat(self, prompt, stream=True, add_system_message=True): def chat_openai(self, prompt, stream, add_system_message): url = urljoin(self.base_url, '/v1/chat/completions') - debug_print( - f'chat: {prompt} to {url} with model {self.model} system_content {self.system_content} and stream {stream}' - ) messages, model = self.make_messages( prompt, False, add_system_message, ) + debug_print( + f'chat: {prompt} to {url} with model {self.model} system_content {self.system_content} and stream {stream}, messages: \n{messages}' + ) payload = { 'messages': messages, 'model': model, @@ -60,7 +60,7 @@ def chat_openai(self, prompt, stream, add_system_message): # https://github.com/openai/openai-python#streaming-responses # The response is SSE, so we need to parse the response line by line. for item in r.iter_content(chunk_size=None): - debug_print(f'\nitem: {item}\ncurrent: {current}') + # debug_print(f'\nitem: {item}\ncurrent: {current}') for msg in item.split(b'\n\n'): msg = msg.removeprefix(b'data: ') if len(msg) == 0: @@ -78,7 +78,6 @@ def chat_openai(self, prompt, stream, add_system_message): continue s = msg.decode('utf-8') - debug_print(f'\nitem to decode: {s}') if s == '[DONE]': self.messages.append({'role': 'assistant', 'content': answer}) return @@ -93,7 +92,6 @@ def chat_openai(self, prompt, stream, add_system_message): answer += msg yield msg except json.JSONDecodeError as e: - debug_print(f'Error when decode JSON: {s}, err:{e}') # this means the message is not a JSON message, so we need to continue searching next }. current = msg continue @@ -130,11 +128,11 @@ def make_messages(self, prompt, support_image, add_system_message): def chat_ollama(self, prompt, stream, add_system_message): model = self.model url = urljoin(self.base_url, '/api/chat') + messages, model = self.make_messages(prompt, True, add_system_message) debug_print( - f'chat: {prompt} to {url} with model {model} system_content {self.system_content} and stream {stream}' + f'chat: {prompt} to {url} with model {self.model} system_content {self.system_content} and stream {stream}, messages: \n{messages}' ) - messages, model = self.make_messages(prompt, True, add_system_message) payload = { 'messages': messages, 'model': model, @@ -142,7 +140,6 @@ def chat_ollama(self, prompt, stream, add_system_message): 'options': {'temperature': self.temperature}, } - debug_print(f'Infer message: {payload}') r = self.http_session.post(url, json=payload, stream=stream) if r.status_code != 200: raise Exception('Error: ' + r.text) diff --git a/shellgpt/app.py b/shellgpt/app.py index afde9e2..20d5354 100644 --- a/shellgpt/app.py +++ b/shellgpt/app.py @@ -42,7 +42,7 @@ def list_content(): # List of commands for autocompletion -commands = ['exit', 'clear', 'editor', 'set', 'copy', 'explain', 'run'] +commands = ['exit', 'clear', 'editor', 'set', 'copy', 'explain', 'run', 'clear'] def completer(text, state): @@ -120,6 +120,9 @@ def repl_action(self, prompt): if new_prompt is not None: self.infer(new_prompt) return True + elif prompt in ['clear']: + self.llm.messages = [] + return True if self.is_shell: if prompt in ['e', 'explain']: @@ -141,6 +144,9 @@ def repl_action(self, prompt): if sub_cmd == 'model': self.llm.model = args[2] return True + elif sub_cmd == 'history': + self.llm.model = args[2] + return True elif sub_cmd == 'system': sc = args[2] self.is_shell = sc == 'shell' @@ -161,7 +167,8 @@ def repl(self, initial_prompt): \ \/\/ / -_) / _/ _ \ ' \/ -_) | _/ _ \ \__ \ ' \/ -_) | | (_ | _/ | | \_/\_/\___|_\__\___/_|_|_\___| \__\___/ |___/_||_\___|_|_|\___|_| |_| -Type "exit" or ctrl-d to exit; ctrl-c to stop response; "c" to copy last answer; "ed" to enter editor mode. +Type "exit" or ctrl-d to exit; ctrl-c to stop response; "c" to copy last answer; + "clear" to reset history messages; "ed" to enter editor mode. When system content is shell , type "e" to explain, "r" to run last command. """, end='', @@ -240,14 +247,14 @@ def main(): ) parser.add_argument('-t', '--tui', action='store_true', help='enter TUI mode') parser.add_argument( - '-s', + '-S', '--shell', action='store_true', help='system content set to `shell`', ) parser.add_argument( - '-c', - '--content', + '-s', + '--system', default='default', help='content for system role (default: %(default)s)', ) @@ -313,7 +320,7 @@ def main(): else: app_mode = AppMode.REPL if len(prompt) == 0 else AppMode.Direct - system_content = args.content + system_content = args.system if args.shell or app_mode == AppMode.TUI: system_content = 'shell'