Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
|
4 |
# Load model and tokenizer explicitly
|
5 |
model_name = "facebook/mbart-large-50"
|
6 |
tokenizer = AutoTokenizer.from_pretrained(model_name, src_lang="ne_NP")
|
7 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(
|
|
|
|
|
|
|
|
|
8 |
|
9 |
-
# Create pipeline with explicit config
|
10 |
summarizer = pipeline(
|
11 |
"summarization",
|
12 |
model=model,
|
@@ -15,16 +19,30 @@ summarizer = pipeline(
|
|
15 |
|
16 |
def summarize_text(text):
|
17 |
try:
|
|
|
|
|
|
|
18 |
summary = summarizer(
|
19 |
text,
|
20 |
max_length=150,
|
21 |
min_length=30,
|
22 |
truncation=True,
|
23 |
-
# No src_lang here anymore
|
24 |
generate_kwargs={"forced_bos_token_id": tokenizer.lang_code_to_id["ne_NP"]}
|
25 |
)[0]['summary_text']
|
26 |
return summary
|
27 |
except Exception as e:
|
28 |
return f"Error during summarization: {str(e)}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
-
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
|
5 |
# Load model and tokenizer explicitly
|
6 |
model_name = "facebook/mbart-large-50"
|
7 |
tokenizer = AutoTokenizer.from_pretrained(model_name, src_lang="ne_NP")
|
8 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(
|
9 |
+
model_name,
|
10 |
+
device_map="auto",
|
11 |
+
low_cpu_mem_usage=True
|
12 |
+
)
|
13 |
|
|
|
14 |
summarizer = pipeline(
|
15 |
"summarization",
|
16 |
model=model,
|
|
|
19 |
|
20 |
def summarize_text(text):
|
21 |
try:
|
22 |
+
if not text.strip():
|
23 |
+
return "Please enter some Nepali text to summarize"
|
24 |
+
|
25 |
summary = summarizer(
|
26 |
text,
|
27 |
max_length=150,
|
28 |
min_length=30,
|
29 |
truncation=True,
|
|
|
30 |
generate_kwargs={"forced_bos_token_id": tokenizer.lang_code_to_id["ne_NP"]}
|
31 |
)[0]['summary_text']
|
32 |
return summary
|
33 |
except Exception as e:
|
34 |
return f"Error during summarization: {str(e)}"
|
35 |
+
finally:
|
36 |
+
torch.cuda.empty_cache()
|
37 |
+
|
38 |
+
iface = gr.Interface(
|
39 |
+
fn=summarize_text,
|
40 |
+
inputs=gr.Textbox(lines=5, label="Nepali Text to Summarize"),
|
41 |
+
outputs=gr.Textbox(lines=5, label="Summary"),
|
42 |
+
title="Nepali Text Summarizer",
|
43 |
+
description="Enter Nepali text and get a concise summary using multilingual NLP models.",
|
44 |
+
allow_flagging="never"
|
45 |
+
)
|
46 |
|
47 |
+
if __name__ == "__main__":
|
48 |
+
iface.launch(server_name="0.0.0.0", server_port=7860)
|