Spaces:
Sleeping
Sleeping
File size: 1,804 Bytes
87b9baf 28b144a 9625079 9ee23d3 9625079 9ee23d3 106e2e7 9625079 28b144a 106e2e7 a198a6a 28b144a 106e2e7 28b144a a198a6a 28b144a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import torch
from transformers import T5ForConditionalGeneration, T5Tokenizer
import gradio as gr
best_model_path = "swcrazyfan/Kingify-2Way-T5-Large-v1_1"
model = T5ForConditionalGeneration.from_pretrained(best_model_path)
tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/Kingify-2Way-T5-Large-v1_1")
def tokenize_data(text, dekingify):
if dekingify == "Dekingify":
input_ = "dekingify: " + str(text) + " </s>"
else:
input_ = "kingify: " + str(text) + " </s>"
max_len = 512
tokenized_inputs = tokenizer(
input_,
padding="max_length",
truncation=True,
max_length=max_len,
return_attention_mask=True,
return_tensors="pt"
)
return {"input_ids": tokenized_inputs["input_ids"],
"attention_mask": tokenized_inputs["attention_mask"]}
def generate_answers(text, dekingify):
inputs = tokenize_data(text, dekingify)
results = model.generate(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
do_sample=True,
num_beams=5,
max_length=512,
min_length=1,
early_stopping=True,
num_return_sequences=1
)
answer = tokenizer.decode(results[0], skip_special_tokens=True)
return answer
iface = gr.Interface(
title="Kingify 2Way",
description=("This is a custom AI model that translates modern English into 17th-century English or "
"'King James' English (and vice versa). Write anything below, select 'Kingify' or 'Dekingify', "
"and click submit."),
fn=generate_answers,
inputs=[
gr.Textbox(label="Original Text", lines=10),
gr.Radio(label="What do you want to do?", choices=["Kingify", "Dekingify"])
],
outputs="text"
)
iface.launch() |