swcrazyfan commited on
Commit
9625079
·
1 Parent(s): e362b3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -17
app.py CHANGED
@@ -1,22 +1,35 @@
1
- import gradio as gr
2
  import torch
3
- from transformers import T5Tokenizer, T5ForConditionalGeneration
 
 
 
 
 
4
 
5
- model = T5ForConditionalGeneration.from_pretrained('swcrazyfan/KingJamesify-T5-large')
6
- tokenizer = T5Tokenizer.from_pretrained('swcrazyfan/KingJamesify-T5-large')
 
 
 
 
 
 
 
7
 
8
- def king_jamesify(input_text, num_beams, max_length, temperature):
9
- prompt_text = "kingify: " + input_text
10
- input_ids = tokenizer.encode(prompt_text, return_tensors='pt').to(torch.int64)
11
- generated_ids = model.generate(input_ids=input_ids, max_length=max_length, num_beams=num_beams, num_beam_groups=num_beams, num_return_sequences=1, temperature=temperature)
12
- result = tokenizer.decode(generated_ids, skip_special_tokens=True)
13
- return result
14
 
15
- iface = gr.Interface(king_jamesify,
16
- [gr.inputs.Textbox(lines=20, label="Enter text to be King Jamesified"),
17
- gr.inputs.Slider(minimum=0.0, maximum=1, default=0.7, label="Temperature"),
18
- gr.inputs.Slider(minimum=1, maximum=512, default=512, label="Max Length"),
19
- gr.inputs.Slider(minimum=1, maximum=10, default=1, label="Number of Beams")],
20
- gr.outputs.Textbox(label="King Jamesified Text"))
 
 
 
 
21
 
22
- iface.launch()
 
 
 
1
  import torch
2
+ from transformers import (T5ForConditionalGeneration,T5Tokenizer)
3
+ import gradio as gr
4
+
5
+ best_model_path = "swcrazyfan/Dekingify-T5-Large"
6
+ model = T5ForConditionalGeneration.from_pretrained(best_model_path)
7
+ tokenizer = T5Tokenizer.from_pretrained("swcrazyfan/Dekingify-T5-Large")
8
 
9
+ def tokenize_data(text, dekingify):
10
+ # Tokenize the review body
11
+ if dekingify == "Dekingify":
12
+ input_ = "dekingify: " + str(text) + ' </s>'
13
+ else:
14
+ input_ = "kingify: " + str(text) + ' </s>'
15
+ max_len = 512
16
+ # tokenize inputs
17
+ tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
18
 
19
+ inputs={"input_ids": tokenized_inputs['input_ids'],
20
+ "attention_mask": tokenized_inputs['attention_mask']}
21
+ return inputs
 
 
 
22
 
23
+ def generate_answers(text, max_length, num_beams, dekingify):
24
+ inputs = tokenize_data(text, dekingify)
25
+ results= model.generate(input_ids= inputs['input_ids'], attention_mask=inputs['attention_mask'], do_sample=True,
26
+ num_beams=num_beams,
27
+ max_length=max_length,
28
+ min_length=1,
29
+ early_stopping=True,
30
+ num_return_sequences=1)
31
+ answer = tokenizer.decode(results[0], skip_special_tokens=True)
32
+ return answer
33
 
34
+ iface = gr.Interface(title="DeKingify", description="Write anything below. Then, click submit to 'DeKingify' it.", fn=generate_answers, inputs=[gr.inputs.Textbox(label="Original Text",lines=10), gr.inputs.Slider(label="Maximum Length", minimum=1, maximum=512, default=512, step=1), gr.inputs.Slider(label="Number of Beams", minimum=1, maximum=50, default=5, step=1), gr.inputs.Radio(label="", choices=["Kingify", "Dekingify"])], outputs=["text"])
35
+ iface.launch(inline=False)