import gradio as gr from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Load the model and tokenizer model_name = "google/flan-t5-large" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) def concatenate_and_generate(text1, text2, temperature, top_p): concatenated_text = text1 + " " + text2 inputs = tokenizer(concatenated_text, return_tensors="pt") # Generate the output with specified temperature and top_p output = model.generate( inputs["input_ids"], do_sample=True, temperature=temperature, top_p=top_p, max_length=100 ) generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text # Define Gradio interface with gr.Blocks(theme="ParityError/Interstellar@0.0.1") as demo: gr.Markdown("# TinyStyler Demo") gr.Markdown("Style transfer the source text into the target style, given some example texts of the target style. You can adjust re-ranking and top_p to your desire to control the quality of style transfer. A higher re-ranking value will generally result in better results, at slower speed.") text1 = gr.Textbox(lines=2, placeholder="Enter the source text to transform into the target style...") text2 = gr.Textbox(lines=2, placeholder="Enter example texts of the target style (one per line)...") temperature = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature") top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.1, label="Top-p") output = gr.Textbox() btn = gr.Button("Generate") btn.click(concatenate_and_generate, [text1, text2, temperature, top_p], output) demo.launch()