Skip to content

Commit

Permalink
fix: 'compeletion' typo (lm-sys#2847)
Browse files Browse the repository at this point in the history
  • Loading branch information
congchan authored and zhanghao.smooth committed Jan 26, 2024
1 parent ae7e7f7 commit b2d6396
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 22 deletions.
16 changes: 8 additions & 8 deletions fastchat/llm_judge/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,9 +160,9 @@ def run_judge_single(question, answer, judge, ref_answer, multi_turn=False):
conv.append_message(conv.roles[1], None)

if model in ["gpt-3.5-turbo", "gpt-4"]:
judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
judgment = chat_completion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ANTHROPIC_MODEL_LIST:
judgment = chat_compeletion_anthropic(
judgment = chat_completion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
Expand Down Expand Up @@ -264,12 +264,12 @@ def run_judge_pair(question, answer_a, answer_b, judge, ref_answer, multi_turn=F

if model in ["gpt-3.5-turbo", "gpt-4"]:
conv.set_system_message(system_prompt)
judgment = chat_compeletion_openai(model, conv, temperature=0, max_tokens=2048)
judgment = chat_completion_openai(model, conv, temperature=0, max_tokens=2048)
elif model in ANTHROPIC_MODEL_LIST:
if system_prompt != "You are a helpful assistant.":
user_prompt = "[Instruction]\n" + system_prompt + "\n\n" + user_prompt
conv.messages[0][1] = user_prompt
judgment = chat_compeletion_anthropic(
judgment = chat_completion_anthropic(
model, conv, temperature=0, max_tokens=1024
)
else:
Expand Down Expand Up @@ -400,7 +400,7 @@ def play_a_match_pair(match: MatchPair, output_file: str):
return result


def chat_compeletion_openai(model, conv, temperature, max_tokens, api_dict=None):
def chat_completion_openai(model, conv, temperature, max_tokens, api_dict=None):
if api_dict is not None:
openai.api_base = api_dict["api_base"]
openai.api_key = api_dict["api_key"]
Expand All @@ -424,7 +424,7 @@ def chat_compeletion_openai(model, conv, temperature, max_tokens, api_dict=None)
return output


def chat_compeletion_openai_azure(model, conv, temperature, max_tokens, api_dict=None):
def chat_completion_openai_azure(model, conv, temperature, max_tokens, api_dict=None):
openai.api_type = "azure"
openai.api_version = "2023-07-01-preview"
if api_dict is not None:
Expand Down Expand Up @@ -463,7 +463,7 @@ def chat_compeletion_openai_azure(model, conv, temperature, max_tokens, api_dict
return output


def chat_compeletion_anthropic(model, conv, temperature, max_tokens):
def chat_completion_anthropic(model, conv, temperature, max_tokens):
output = API_ERROR_OUTPUT
for _ in range(API_MAX_RETRY):
try:
Expand All @@ -484,7 +484,7 @@ def chat_compeletion_anthropic(model, conv, temperature, max_tokens):
return output.strip()


def chat_compeletion_palm(chat_state, model, conv, temperature, max_tokens):
def chat_completion_palm(chat_state, model, conv, temperature, max_tokens):
from fastchat.serve.api_provider import init_palm_chat

assert model == "palm-2-chat-bison-001"
Expand Down
14 changes: 6 additions & 8 deletions fastchat/llm_judge/gen_api_answer.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
from fastchat.llm_judge.common import (
load_questions,
temperature_config,
chat_compeletion_openai,
chat_compeletion_anthropic,
chat_compeletion_palm,
chat_completion_openai,
chat_completion_anthropic,
chat_completion_palm,
)
from fastchat.llm_judge.gen_model_answer import reorg_answer_file
from fastchat.model.model_adapter import get_conversation_template, ANTHROPIC_MODEL_LIST
Expand Down Expand Up @@ -50,15 +50,13 @@ def get_answer(
conv.append_message(conv.roles[1], None)

if model in ANTHROPIC_MODEL_LIST:
output = chat_compeletion_anthropic(
model, conv, temperature, max_tokens
)
output = chat_completion_anthropic(model, conv, temperature, max_tokens)
elif model == "palm-2-chat-bison-001":
chat_state, output = chat_compeletion_palm(
chat_state, output = chat_completion_palm(
chat_state, model, conv, temperature, max_tokens
)
else:
output = chat_compeletion_openai(model, conv, temperature, max_tokens)
output = chat_completion_openai(model, conv, temperature, max_tokens)

conv.update_last_message(output)
turns.append(output)
Expand Down
12 changes: 6 additions & 6 deletions fastchat/serve/monitor/summarize_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@
import pickle

from fastchat.llm_judge.common import (
chat_compeletion_openai,
chat_compeletion_openai_azure,
chat_compeletion_anthropic,
chat_completion_openai,
chat_completion_openai_azure,
chat_completion_anthropic,
)
from fastchat.conversation import get_conv_template

Expand Down Expand Up @@ -52,13 +52,13 @@ def truncate_string(s, l):

if "azure-" in model:
template_name = "chatgpt"
completion_func = chat_compeletion_openai_azure
completion_func = chat_completion_openai_azure
elif "gpt" in model:
template_name = "chatgpt"
completion_func = chat_compeletion_openai
completion_func = chat_completion_openai
elif "claude" in model:
template_name = "claude"
completion_func = chat_compeletion_anthropic
completion_func = chat_completion_anthropic

conv = get_conv_template(template_name)
conv.set_system_message(instruct)
Expand Down

0 comments on commit b2d6396

Please sign in to comment.