Spaces:
Running
Running
import gradio as gr | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
# Load the model and tokenizer | |
model_name = "google/flan-t5-large" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
def concatenate_and_generate(text1, text2, temperature, top_p): | |
concatenated_text = text1 + " " + text2 | |
inputs = tokenizer(concatenated_text, return_tensors="pt") | |
# Generate the output with specified temperature and top_p | |
output = model.generate( | |
inputs["input_ids"], | |
do_sample=True, | |
temperature=temperature, | |
top_p=top_p, | |
max_length=100 | |
) | |
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
return generated_text | |
# Define Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# Text Concatenation and Generation with FLAN-T5") | |
gr.Markdown("Concatenate two input texts and generate an output using google/flan-t5-large. Adjust the temperature and top_p parameters for different generation behaviors.") | |
text1 = gr.Textbox(lines=2, placeholder="Enter first text here...") | |
text2 = gr.Textbox(lines=2, placeholder="Enter second text here...") | |
temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature") | |
top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.1, label="Top-p") | |
output = gr.Textbox() | |
btn = gr.Button("Generate") | |
btn.click(concatenate_and_generate, [text1, text2, temperature, top_p], output) | |
demo.launch() |