Skip to content

Commit

Permalink
!refactor: add system args
Browse files Browse the repository at this point in the history
  • Loading branch information
jiacai2050 committed Aug 10, 2024
1 parent aa4e75d commit 040ee0b
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 15 deletions.
15 changes: 6 additions & 9 deletions shellgpt/api/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,14 @@ def chat(self, prompt, stream=True, add_system_message=True):

def chat_openai(self, prompt, stream, add_system_message):
url = urljoin(self.base_url, '/v1/chat/completions')
debug_print(
f'chat: {prompt} to {url} with model {self.model} system_content {self.system_content} and stream {stream}'
)
messages, model = self.make_messages(
prompt,
False,
add_system_message,
)
debug_print(
f'chat: {prompt} to {url} with model {self.model} system_content {self.system_content} and stream {stream}, messages: \n{messages}'
)
payload = {
'messages': messages,
'model': model,
Expand All @@ -60,7 +60,7 @@ def chat_openai(self, prompt, stream, add_system_message):
# https://github.com/openai/openai-python#streaming-responses
# The response is SSE, so we need to parse the response line by line.
for item in r.iter_content(chunk_size=None):
debug_print(f'\nitem: {item}\ncurrent: {current}')
# debug_print(f'\nitem: {item}\ncurrent: {current}')
for msg in item.split(b'\n\n'):
msg = msg.removeprefix(b'data: ')
if len(msg) == 0:
Expand All @@ -78,7 +78,6 @@ def chat_openai(self, prompt, stream, add_system_message):
continue

s = msg.decode('utf-8')
debug_print(f'\nitem to decode: {s}')
if s == '[DONE]':
self.messages.append({'role': 'assistant', 'content': answer})
return
Expand All @@ -93,7 +92,6 @@ def chat_openai(self, prompt, stream, add_system_message):
answer += msg
yield msg
except json.JSONDecodeError as e:
debug_print(f'Error when decode JSON: {s}, err:{e}')
# this means the message is not a JSON message, so we need to continue searching next }.
current = msg
continue
Expand Down Expand Up @@ -130,19 +128,18 @@ def make_messages(self, prompt, support_image, add_system_message):
def chat_ollama(self, prompt, stream, add_system_message):
model = self.model
url = urljoin(self.base_url, '/api/chat')
messages, model = self.make_messages(prompt, True, add_system_message)
debug_print(
f'chat: {prompt} to {url} with model {model} system_content {self.system_content} and stream {stream}'
f'chat: {prompt} to {url} with model {self.model} system_content {self.system_content} and stream {stream}, messages: \n{messages}'
)

messages, model = self.make_messages(prompt, True, add_system_message)
payload = {
'messages': messages,
'model': model,
'stream': stream,
'options': {'temperature': self.temperature},
}

debug_print(f'Infer message: {payload}')
r = self.http_session.post(url, json=payload, stream=stream)
if r.status_code != 200:
raise Exception('Error: ' + r.text)
Expand Down
19 changes: 13 additions & 6 deletions shellgpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def list_content():


# List of commands for autocompletion
commands = ['exit', 'clear', 'editor', 'set', 'copy', 'explain', 'run']
commands = ['exit', 'clear', 'editor', 'set', 'copy', 'explain', 'run', 'clear']


def completer(text, state):
Expand Down Expand Up @@ -120,6 +120,9 @@ def repl_action(self, prompt):
if new_prompt is not None:
self.infer(new_prompt)
return True
elif prompt in ['clear']:
self.llm.messages = []
return True

if self.is_shell:
if prompt in ['e', 'explain']:
Expand All @@ -141,6 +144,9 @@ def repl_action(self, prompt):
if sub_cmd == 'model':
self.llm.model = args[2]
return True
elif sub_cmd == 'history':
self.llm.model = args[2]
return True
elif sub_cmd == 'system':
sc = args[2]
self.is_shell = sc == 'shell'
Expand All @@ -161,7 +167,8 @@ def repl(self, initial_prompt):
\ \/\/ / -_) / _/ _ \ ' \/ -_) | _/ _ \ \__ \ ' \/ -_) | | (_ | _/ | |
\_/\_/\___|_\__\___/_|_|_\___| \__\___/ |___/_||_\___|_|_|\___|_| |_|
Type "exit" or ctrl-d to exit; ctrl-c to stop response; "c" to copy last answer; "ed" to enter editor mode.
Type "exit" or ctrl-d to exit; ctrl-c to stop response; "c" to copy last answer;
"clear" to reset history messages; "ed" to enter editor mode.
When system content is shell , type "e" to explain, "r" to run last command.
""",
end='',
Expand Down Expand Up @@ -240,14 +247,14 @@ def main():
)
parser.add_argument('-t', '--tui', action='store_true', help='enter TUI mode')
parser.add_argument(
'-s',
'-S',
'--shell',
action='store_true',
help='system content set to `shell`',
)
parser.add_argument(
'-c',
'--content',
'-s',
'--system',
default='default',
help='content for system role (default: %(default)s)',
)
Expand Down Expand Up @@ -313,7 +320,7 @@ def main():
else:
app_mode = AppMode.REPL if len(prompt) == 0 else AppMode.Direct

system_content = args.content
system_content = args.system
if args.shell or app_mode == AppMode.TUI:
system_content = 'shell'

Expand Down

0 comments on commit 040ee0b

Please sign in to comment.