Spaces:
Runtime error
Runtime error
rambocoder
commited on
Commit
Β·
04a6116
1
Parent(s):
3ecd79f
Add summarization
Browse files- .gitignore +1 -0
- README.md +6 -0
- app.py +16 -5
- app_hello.py +7 -0
- app_questions.py +17 -0
- requirements.txt +4 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.conda
|
README.md
CHANGED
@@ -11,3 +11,9 @@ license: apache-2.0
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
14 |
+
|
15 |
+
## T1000
|
16 |
+
|
17 |
+
Activate conda `conda activate $PWD/.conda` and deactivate `conda deactivate`
|
18 |
+
|
19 |
+
Install dependencies `pip install -r requirements.txt`
|
app.py
CHANGED
@@ -1,7 +1,18 @@
|
|
1 |
-
import
|
|
|
|
|
|
|
|
|
2 |
|
3 |
-
def greet(name):
|
4 |
-
return "Hello " + name + "!!"
|
5 |
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
|
2 |
+
import gradio as grad
|
3 |
+
mdl_name = "google/pegasus-xsum"
|
4 |
+
pegasus_tkn = PegasusTokenizer.from_pretrained(mdl_name)
|
5 |
+
mdl = PegasusForConditionalGeneration.from_pretrained(mdl_name)
|
6 |
|
|
|
|
|
7 |
|
8 |
+
def summarize(text):
|
9 |
+
tokens = pegasus_tkn(text, truncation=True,
|
10 |
+
padding="longest", return_tensors="pt")
|
11 |
+
txt_summary = mdl.generate(**tokens)
|
12 |
+
response = pegasus_tkn.batch_decode(txt_summary, skip_special_tokens=True)
|
13 |
+
return response
|
14 |
+
|
15 |
+
|
16 |
+
txt = grad.Textbox(lines=10, label="English", placeholder="English Text here")
|
17 |
+
out = grad.Textbox(lines=10, label="Summary")
|
18 |
+
grad.Interface(summarize, inputs=txt, outputs=out).launch()
|
app_hello.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
|
3 |
+
def greet(name):
|
4 |
+
return "Hello " + name + "!!"
|
5 |
+
|
6 |
+
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
7 |
+
iface.launch()
|
app_questions.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
2 |
+
import gradio as grad
|
3 |
+
import ast
|
4 |
+
mdl_name = "deepset/roberta-base-squad2"
|
5 |
+
my_pipeline = pipeline('question-answering',
|
6 |
+
model=mdl_name, tokenizer=mdl_name)
|
7 |
+
|
8 |
+
|
9 |
+
def answer_question(question, context):
|
10 |
+
text = "{"+"'question': '"+question+"','context': '"+context+"'}"
|
11 |
+
di = ast.literal_eval(text)
|
12 |
+
response = my_pipeline(di)
|
13 |
+
return response
|
14 |
+
|
15 |
+
|
16 |
+
grad.Interface(answer_question, inputs=["text", "text"],
|
17 |
+
outputs="text").launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
transformers
|
3 |
+
torch
|
4 |
+
transformers[sentencepiece]
|