aditi2222 commited on
Commit
acd0faf
·
1 Parent(s): 4272894

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import (T5ForConditionalGeneration,T5Tokenizer)
3
+ import gradio as gr
4
+
5
+ def set_seed(seed):
6
+ torch.manual_seed(seed)
7
+ set_seed(42)
8
+
9
+ best_model_path = "aditi2222/t5-paraphrase"
10
+ model = T5ForConditionalGeneration.from_pretrained(best_model_path)
11
+ tokenizer = T5Tokenizer.from_pretrained('aditi2222/t5-paraphrase')
12
+
13
+ def tokenize_data(text):
14
+ # Tokenize the review body
15
+ #input_ = "paraphrase: "+ str(text) + ' </s>'
16
+
17
+ max_len = 64
18
+ # tokenize inputs
19
+ tokenized_inputs = tokenizer(input_, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='pt')
20
+
21
+ inputs={"input_ids": tokenized_inputs['input_ids'],
22
+ "attention_mask": tokenized_inputs['attention_mask']
23
+ }
24
+
25
+ return inputs
26
+
27
+ def generate_answers(text):
28
+ inputs = tokenize_data(text)
29
+ results= model.generate(input_ids= inputs['input_ids'].to(device), attention_mask=inputs['attention_mask'].to(device), do_sample=True,
30
+ max_length=64,
31
+ top_k=120,
32
+ top_p=0.98,
33
+ early_stopping=True,
34
+ num_return_sequences=1)
35
+ answer = tokenizer.decode(results[0], skip_special_tokens=True)
36
+ return answer
37
+
38
+ iface = gr.Interface(fn=generate_answers, inputs=['text'],outputs=["text"])
39
+ iface.launch(share=True)