Spaces:
Sleeping
Sleeping
Joaoffg
commited on
Commit
·
57d5b1c
1
Parent(s):
c7eb5fd
Update space
Browse files
app.py
CHANGED
@@ -59,61 +59,50 @@ def generate_and_tokenize_prompt(data_point):
|
|
59 |
return tokenized_full_prompt
|
60 |
|
61 |
def evaluate(instruction):
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
num_return_sequences=num_return_sequences,
|
89 |
-
pad_token_id = 0
|
90 |
-
# num_beam_groups=num_beam_groups
|
91 |
-
)
|
92 |
-
|
93 |
-
generate_params = {
|
94 |
-
"input_ids": input_ids,
|
95 |
-
"generation_config": generation_config,
|
96 |
-
"return_dict_in_generate": True,
|
97 |
-
"output_scores": True,
|
98 |
-
"max_new_tokens": max_new_tokens,
|
99 |
-
}
|
100 |
-
with torch.no_grad():
|
101 |
-
generation_output = model.generate(
|
102 |
-
input_ids=input_ids,
|
103 |
-
generation_config=generation_config,
|
104 |
-
return_dict_in_generate=True,
|
105 |
-
output_scores=True,
|
106 |
-
max_new_tokens=max_new_tokens,
|
107 |
)
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
# Define the Gradio interface
|
119 |
interface = gr.Interface(
|
|
|
59 |
return tokenized_full_prompt
|
60 |
|
61 |
def evaluate(instruction):
|
62 |
+
try:
|
63 |
+
# Generate a response:
|
64 |
+
input_text = None
|
65 |
+
prompt = prompter.generate_prompt(instruction, input_text)
|
66 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
67 |
+
input_ids = inputs["input_ids"]
|
68 |
+
|
69 |
+
temperature = 0.2
|
70 |
+
top_p = 0.95
|
71 |
+
top_k = 25
|
72 |
+
num_beams = 1
|
73 |
+
max_new_tokens = 256
|
74 |
+
repetition_penalty = 2.0
|
75 |
+
do_sample = True
|
76 |
+
num_return_sequences = 1
|
77 |
+
|
78 |
+
generation_config = transformers.GenerationConfig(
|
79 |
+
temperature=temperature,
|
80 |
+
top_p=top_p,
|
81 |
+
top_k=top_k,
|
82 |
+
num_beams=num_beams,
|
83 |
+
repetition_penalty=repetition_penalty,
|
84 |
+
do_sample=do_sample,
|
85 |
+
min_new_tokens=32,
|
86 |
+
num_return_sequences=num_return_sequences,
|
87 |
+
pad_token_id=0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
)
|
89 |
+
|
90 |
+
with torch.no_grad():
|
91 |
+
generation_output = model.generate(
|
92 |
+
input_ids=input_ids,
|
93 |
+
generation_config=generation_config,
|
94 |
+
return_dict_in_generate=True,
|
95 |
+
output_scores=True,
|
96 |
+
max_new_tokens=max_new_tokens,
|
97 |
+
)
|
98 |
+
|
99 |
+
print(f'Instruction: {instruction}')
|
100 |
+
|
101 |
+
for i, s in enumerate(generation_output.sequences):
|
102 |
+
output = tokenizer.decode(s, skip_special_tokens=True)
|
103 |
+
return prompter.get_response(output)
|
104 |
+
except Exception as e:
|
105 |
+
return str(e)
|
106 |
|
107 |
# Define the Gradio interface
|
108 |
interface = gr.Interface(
|