-
Notifications
You must be signed in to change notification settings - Fork 4
/
generate.py
100 lines (85 loc) · 2.55 KB
/
generate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import os
import sys
import fire
import gradio as gr
import torch
from transformers import GenerationConfig, AutoModelForCausalLM, AutoTokenizer
from utils.prompt import get_prompt
def main(
base_model: str = "",
):
device = "cuda"
device_map = "auto"
model = AutoModelForCausalLM.from_pretrained(
base_model,
torch_dtype=torch.float16,
device_map=device_map,
)
tokenizer = AutoTokenizer.from_pretrained(base_model)
model.half()
model.eval()
def evaluate(
instruction,
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=4,
max_new_tokens=128,
**kwargs,
):
prompt = get_prompt(instruction)
inputs = tokenizer(prompt, return_tensors="pt")
input_ids = inputs["input_ids"].to(device)
attention_mask = inputs["attention_mask"].to(device)
generation_config = GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
**kwargs,
)
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
max_new_tokens=max_new_tokens
)
decoded = [
tokenizer.decode(_o[_a.sum():], skip_special_tokens=True)
for _a, _o in zip(attention_mask, generation_output)
]
yield decoded[0]
gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=2,
label="Query",
placeholder="Ask me anything",
),
gr.components.Slider(
minimum=0, maximum=1, value=0.1, label="Temperature"
),
gr.components.Slider(
minimum=0, maximum=1, value=0.75, label="Top p"
),
gr.components.Slider(
minimum=0, maximum=100, step=1, value=40, label="Top k"
),
gr.components.Slider(
minimum=1, maximum=4, step=1, value=4, label="Beams"
),
gr.components.Slider(
minimum=1, maximum=8192, step=1, value=128, label="Max tokens"
)
],
outputs=[
gr.components.Textbox(
lines=15,
label="Output",
)
],
title="Code Millenials",
description="An instruction finetuned model",
).queue().launch(server_name="0.0.0.0", share=True)
if __name__ == "__main__":
fire.Fire(main)