Spaces:
Running
Running
import gradio as gr | |
import numpy as np | |
import pandas as pd | |
from transformers import pipeline | |
import torch | |
model = "GeneZC/MiniChat-2-3B" | |
generator=pipeline(task='text-generation', model=model) | |
tones = { | |
'natural': 'human, authentic', | |
'fluency': 'readable, clarified', | |
'formal': 'sophistocated', | |
'academic': 'technical and scholarly', | |
'simple': 'simple and easily understandable', | |
} | |
def generate(text, max_length): | |
x=generator(text, max_length=max_length, num_return_sequences=1) | |
return x | |
def respond(message, history, tone="natural", max_length=512): | |
prompt = f"<s> [|User|]Paraphrase this text in a more {tones[tone]} way: {message} </s>[|Assistant|]" | |
text = generate(prompt, max_length) | |
print(text) | |
text = text[0]["generated_text"] | |
text = text.split("[|Assistant|]", 1)[1] | |
return text | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Dropdown( | |
["natural", "fluency", "formal", "academic", "simple"], label="Tone", value="natural" | |
), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() |