t5-paraphrasing / app.py
Ceshine Lee
Invoke inference API manually
d38d726
raw
history blame
1.71 kB
import os
import json
import requests
import gradio as gr
from gradio import inputs, outputs
ENDPOINT = (
"https://api-inference.huggingface.co/models/ceshine/t5-paraphrase-quora-paws"
)
def paraphrase(source_text):
res = requests.post(
ENDPOINT,
headers={"Authorization": f"Bearer {os.environ['TOKEN']}"},
data=json.dumps(
{
"inputs": source_text,
"parameters": {
# "do_sample": True,
"num_beams": 10,
"top_k": 5,
"repetition_penalty": 2.0,
"temperature": 1.5,
"num_return_sequences": 10,
"max_length": 200,
},
}
),
)
if not (res.status_code == 200):
raise ValueError(
"Could not complete request to HuggingFace API, Error {}".format(
res.status_code
)
)
results = res.json()
print(results)
outputs = [
x["generated_text"]
for x in results
if x["generated_text"].lower() != source_text.lower().strip()
][:3]
text = ""
for i, output in enumerate(outputs):
text += f"{i+1}: {output}\n\n"
return text
interface = gr.Interface(
fn=paraphrase,
inputs=inputs.Textbox(label="Input"),
outputs=outputs.Textbox(label="Generated text:"),
title="T5 Sentence Paraphraser",
description="A paraphrasing model trained on PAWS and Quora datasets",
examples=[
["I bought a ticket from London to New York."],
["Weh Seun spends 14 hours a week doing housework."],
],
)
interface.launch(enable_queue=True)