Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -44,10 +44,10 @@ def create_response_original(input_str,
|
|
44 |
input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
|
45 |
#output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
|
46 |
output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
|
47 |
-
outputs =
|
48 |
for output_id in output_ids:
|
49 |
output = tokenizer.decode(output_id, skip_special_tokens=True)
|
50 |
-
outputs
|
51 |
return outputs
|
52 |
|
53 |
def create_response_fine_tuned(input_str):
|
@@ -98,7 +98,7 @@ interface1 = gr.Interface(fn=create_response_original,
|
|
98 |
"If is set to True, the generate function will use stochastic sampling, which means that it will randomly" +
|
99 |
" select a word from the probability distribution at each step. This results in a more diverse and creative" +
|
100 |
" output, but it might also introduce errors and inconsistencies ", value=True)
|
101 |
-
], outputs="
|
102 |
interface2 = gr.Interface(fn=create_response_fine_tuned, inputs="text", outputs="text", title="Fine Tuned")
|
103 |
demo = gr.TabbedInterface([interface1, interface2], ["Original", "Fine Tuned"])
|
104 |
# with gr.Blocks() as demo:
|
|
|
44 |
input_ids = tokenizer.encode(input_str + tokenizer.eos_token, return_tensors="pt")
|
45 |
#output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=0.2, top_p=0.9, repetition_penalty=1.5,num_return_sequences=6)
|
46 |
output_ids = fine_tuned_model.generate(input_ids,do_sample=True, max_length=100, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,num_return_sequences=num_return_sequences)
|
47 |
+
outputs = []]
|
48 |
for output_id in output_ids:
|
49 |
output = tokenizer.decode(output_id, skip_special_tokens=True)
|
50 |
+
outputs.append(output)
|
51 |
return outputs
|
52 |
|
53 |
def create_response_fine_tuned(input_str):
|
|
|
98 |
"If is set to True, the generate function will use stochastic sampling, which means that it will randomly" +
|
99 |
" select a word from the probability distribution at each step. This results in a more diverse and creative" +
|
100 |
" output, but it might also introduce errors and inconsistencies ", value=True)
|
101 |
+
], outputs=[gr.Textbox(label="output response", lines=30)])
|
102 |
interface2 = gr.Interface(fn=create_response_fine_tuned, inputs="text", outputs="text", title="Fine Tuned")
|
103 |
demo = gr.TabbedInterface([interface1, interface2], ["Original", "Fine Tuned"])
|
104 |
# with gr.Blocks() as demo:
|