import gradio as gr from transformers import AutoTokenizer, pipeline import torch tokenizer1 = AutoTokenizer.from_pretrained("notexist/tttf") tdk1 = pipeline('text-generation', model='notexist/tttf', tokenizer=tokenizer1) tokenizer2 = AutoTokenizer.from_pretrained("notexist/ttte") tdk2 = pipeline('text-generation', model='notexist/ttte', tokenizer=tokenizer2) def predict(name, sl, topk, topp): if name == "": x1 = tdk1(f"<|endoftext|>", do_sample=True, max_length=64, top_k=topk, top_p=topp, num_return_sequences=1, repetition_penalty=sl )[0]["generated_text"] new_name = x1[len(f"<|endoftext|>"):x1.index("\n\n")] x2 = tdk2(f"<|endoftext|>{new_name}\n\n", do_sample=True, max_length=64, top_k=topk, top_p=topp, num_return_sequences=2, repetition_penalty=sl )[0]["generated_text"] if "[TEXT]" not in x2: return x1[len(f"<|endoftext|>"):] else: return x1[len(f"<|endoftext|>"):]+"\n\n"+x2[len(f"<|endoftext|>{new_name}\n\n"):].replace("[TEXT]", " "+new_name+" ") else: x1 = tdk1(f"<|endoftext|>{name}\n\n", do_sample=True, max_length=64, top_k=topk, top_p=topp, num_return_sequences=1, repetition_penalty=sl )[0]["generated_text"] x2 = tdk2(f"<|endoftext|>{name}\n\n", do_sample=True, max_length=64, top_k=topk, top_p=topp, num_return_sequences=2, repetition_penalty=sl )[0]["generated_text"] if "[TEXT]" not in x2: return x1[len(f"<|endoftext|>{name}\n\n"):] else: return x1[len(f"<|endoftext|>{name}\n\n"):]+"\n\n"+x2[len(f"<|endoftext|>{name}\n\n"):].replace("[TEXT]", " "+name+" ") iface = gr.Interface(fn=predict, inputs=["text",\ gr.inputs.Slider(0, 3, default=1.1, label="repetition_penalty"),\ gr.inputs.Slider(0, 100, default=75, label="top_k"),\ gr.inputs.Slider(0, 1, default=0.95, label="top_p")] , outputs="text") iface.launch()