Lautaro Cardarelli commited on
Commit
dc63bd9
·
1 Parent(s): b58cd5a

upgrade gradio version

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +7 -10
  3. requirements.txt +5 -2
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🏃
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 4.2.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.1.0
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -1,21 +1,17 @@
1
- import gradio as gr
2
- import pandas as pd
3
  import torch
 
4
  from googletrans import Translator
5
  from transformers import T5Tokenizer
6
  from transformers import T5ForConditionalGeneration
7
  from transformers import BartForConditionalGeneration
8
  from transformers import BartTokenizer
9
- from transformers import pipeline
 
10
 
11
  tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
12
  model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
13
 
14
 
15
-
16
- from transformers import PreTrainedModel
17
- from transformers import PreTrainedTokenizer
18
-
19
  # Question launcher
20
  class E2EQGPipeline:
21
  def __init__(
@@ -24,7 +20,7 @@ class E2EQGPipeline:
24
  tokenizer: PreTrainedTokenizer
25
  ):
26
 
27
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
28
 
29
  self.model = model
30
  self.tokenizer = tokenizer
@@ -100,9 +96,10 @@ def generate_summary(text):
100
 
101
 
102
  def process(text):
103
- return generate_summary(text), generate_questions(text)
 
104
 
105
 
106
  textbox = gr.Textbox(label="Pega el text aca:", placeholder="Texto...", lines=15)
107
- demo = gr.Interface(fn=process, inputs=textbox, outputs=["text", "text"])
108
  demo.launch()
 
 
 
1
  import torch
2
+ import gradio as gr
3
  from googletrans import Translator
4
  from transformers import T5Tokenizer
5
  from transformers import T5ForConditionalGeneration
6
  from transformers import BartForConditionalGeneration
7
  from transformers import BartTokenizer
8
+ from transformers import PreTrainedModel
9
+ from transformers import PreTrainedTokenizer
10
 
11
  tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
12
  model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
13
 
14
 
 
 
 
 
15
  # Question launcher
16
  class E2EQGPipeline:
17
  def __init__(
 
20
  tokenizer: PreTrainedTokenizer
21
  ):
22
 
23
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
24
 
25
  self.model = model
26
  self.tokenizer = tokenizer
 
96
 
97
 
98
  def process(text):
99
+ print(generate_questions(text))
100
+ return generate_summary(text)
101
 
102
 
103
  textbox = gr.Textbox(label="Pega el text aca:", placeholder="Texto...", lines=15)
104
+ demo = gr.Interface(fn=process, inputs=textbox, outputs="text")
105
  demo.launch()
requirements.txt CHANGED
@@ -1,5 +1,8 @@
1
- gradio==4.2.0
2
  transformers
3
  torch
4
  accelerate
5
- googletrans-py
 
 
 
 
1
+ gradio==5.1.0
2
  transformers
3
  torch
4
  accelerate
5
+ # We are using this fork since the orignal google library uses an old package version of httpx
6
+ # which is not compatible with the last version of gradio
7
+ googletrans-py
8
+ sentencepiece