eloi-goncalves commited on
Commit
21d73b8
·
1 Parent(s): 03113eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -15
app.py CHANGED
@@ -2,13 +2,13 @@ from transformers import AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, A
2
  import gradio as grad
3
  import ast
4
 
5
- mdl_name = "deepset/roberta-base-squad2"
6
- my_pipeline = pipeline('question-answering', model=mdl_name, tokenizer=mdl_name)
7
 
8
- model_translate_name = 'danhsf/m2m100_418M-finetuned-kde4-en-to-pt_BR'
9
- model_translate = AutoModelForSeq2SeqLM.from_pretrained(model_translate_name)
10
- model_translate_token = AutoTokenizer.from_pretrained(model_translate_name)
11
- translate_pipeline = pipeline('translation', model=model_translate_name)
12
 
13
  def answer_question(question,context):
14
  text= "{"+"'question': '"+question+"','context': '"+context+"'}"
@@ -16,6 +16,7 @@ def answer_question(question,context):
16
  response = my_pipeline(di)
17
  print('response', response)
18
  return response
 
19
 
20
 
21
  def translate(text):
@@ -24,21 +25,34 @@ def translate(text):
24
  response = model_translate_token(translate_output[0], skip_special_tokens=True)
25
  #response = translate_pipeline(text)
26
  return response
 
27
 
28
 
29
-
30
- mdl_name = "google/pegasus-xsum"
31
- pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
32
- mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
33
 
34
  def summarize(text):
35
  tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
36
  txt_summary = mdl.generate(**tokens, num_return_sequences=5, max_length=200, temperature=1.5,num_beams=10)
37
  response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
38
  return response
39
- txt=grad.Textbox(lines=10, label="English", placeholder="English Text here")
40
- out=grad.Textbox(lines=10, label="Summary")
41
- grad.Interface(summarize, inputs=txt, outputs=out).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
- #grad.Interface(answer_question, inputs=["text","text"], outputs="text").launch()
44
- # grad.Interface(translate, inputs=['text',], outputs='text').launch()
 
2
  import gradio as grad
3
  import ast
4
 
5
+ # mdl_name = "deepset/roberta-base-squad2"
6
+ # my_pipeline = pipeline('question-answering', model=mdl_name, tokenizer=mdl_name)
7
 
8
+ # model_translate_name = 'danhsf/m2m100_418M-finetuned-kde4-en-to-pt_BR'
9
+ # model_translate = AutoModelForSeq2SeqLM.from_pretrained(model_translate_name)
10
+ # model_translate_token = AutoTokenizer.from_pretrained(model_translate_name)
11
+ # translate_pipeline = pipeline('translation', model=model_translate_name)
12
 
13
  def answer_question(question,context):
14
  text= "{"+"'question': '"+question+"','context': '"+context+"'}"
 
16
  response = my_pipeline(di)
17
  print('response', response)
18
  return response
19
+ #grad.Interface(answer_question, inputs=["text","text"], outputs="text").launch()
20
 
21
 
22
  def translate(text):
 
25
  response = model_translate_token(translate_output[0], skip_special_tokens=True)
26
  #response = translate_pipeline(text)
27
  return response
28
+ # grad.Interface(translate, inputs=['text',], outputs='text').launch()
29
 
30
 
31
+ # mdl_name = "google/pegasus-xsum"
32
+ # pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
33
+ # mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
 
34
 
35
  def summarize(text):
36
  tokens = pegasus_tkn(text, truncation=True, padding="longest", return_tensors="pt")
37
  txt_summary = mdl.generate(**tokens, num_return_sequences=5, max_length=200, temperature=1.5,num_beams=10)
38
  response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
39
  return response
40
+
41
+ # txt=grad.Textbox(lines=10, label="English", placeholder="English Text here")
42
+ # out=grad.Textbox(lines=10, label="Summary")
43
+ # grad.Interface(summarize, inputs=txt, outputs=out).launch()
44
+
45
+
46
+ from transformers import pipeline
47
+ import gradio as grad
48
+ zero_shot_classifier = pipeline("zero-shot-classification")
49
+ def classify(text,labels):
50
+ classifer_labels = labels.split(",")
51
+ #["software", "politics", "love", "movies", "emergency", "advertisment","sports"]
52
+ response = zero_shot_classifier(text,classifer_labels)
53
+ return response
54
+ txt=grad.Textbox(lines=1, label="English", placeholder="text to be classified")
55
+ labels=grad.Textbox(lines=1, label="Labels", placeholder="comma separated labels")
56
+ out=grad.Textbox(lines=1, label="Classification")
57
+ grad.Interface(classify, inputs=[txt,labels], outputs=out).launch()
58