Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
from pptx import Presentation
|
2 |
import re
|
3 |
from transformers import pipeline
|
4 |
-
import subprocess
|
5 |
import gradio as gr
|
6 |
|
7 |
def extract_text_from_pptx(file_path):
|
@@ -22,7 +21,7 @@ def predict_pptx_content(file_path):
|
|
22 |
cleaned_text = re.sub(r'\s+', ' ', extracted_text)
|
23 |
|
24 |
classifier = pipeline("text-classification", model="Ahmed235/roberta_classification")
|
25 |
-
#summarizer = pipeline("summarization", model="Falconsai/text_summarization")
|
26 |
|
27 |
result = classifier(cleaned_text)[0]
|
28 |
predicted_label = result['label']
|
@@ -31,29 +30,14 @@ def predict_pptx_content(file_path):
|
|
31 |
prediction = {
|
32 |
"Predicted Label": predicted_label,
|
33 |
"Evaluation": f"Evaluate the topic according to {predicted_label} is: {predicted_probability}"
|
34 |
-
#"Summary": summarizer(cleaned_text, max_length=80, min_length=30, do_sample=False)
|
35 |
}
|
36 |
|
37 |
return prediction
|
38 |
except Exception as e:
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
classifier = pipeline("text-classification", model="Ahmed235/roberta_classification")
|
44 |
-
#summarizer = pipeline("summarization", model="Falconsai/text_summarization")
|
45 |
-
|
46 |
-
result = classifier(cleaned_text)[0]
|
47 |
-
predicted_label = result['label']
|
48 |
-
predicted_probability = result['score']
|
49 |
-
|
50 |
-
prediction = {
|
51 |
-
"Predicted Label": predicted_label,
|
52 |
-
"Evaluation": f"Evaluate the topic according to {predicted_label} is: {predicted_probability}"
|
53 |
-
#"Summary": summarizer(cleaned_text, max_length=80, min_length=30, do_sample=False)
|
54 |
-
}
|
55 |
-
|
56 |
-
return prediction
|
57 |
|
58 |
# Define the Gradio interface
|
59 |
iface = gr.Interface(
|
@@ -65,4 +49,4 @@ iface = gr.Interface(
|
|
65 |
)
|
66 |
|
67 |
# Deploy the Gradio interface
|
68 |
-
iface.launch(share=True)
|
|
|
1 |
from pptx import Presentation
|
2 |
import re
|
3 |
from transformers import pipeline
|
|
|
4 |
import gradio as gr
|
5 |
|
6 |
def extract_text_from_pptx(file_path):
|
|
|
21 |
cleaned_text = re.sub(r'\s+', ' ', extracted_text)
|
22 |
|
23 |
classifier = pipeline("text-classification", model="Ahmed235/roberta_classification")
|
24 |
+
# summarizer = pipeline("summarization", model="Falconsai/text_summarization")
|
25 |
|
26 |
result = classifier(cleaned_text)[0]
|
27 |
predicted_label = result['label']
|
|
|
30 |
prediction = {
|
31 |
"Predicted Label": predicted_label,
|
32 |
"Evaluation": f"Evaluate the topic according to {predicted_label} is: {predicted_probability}"
|
33 |
+
# "Summary": summarizer(cleaned_text, max_length=80, min_length=30, do_sample=False)
|
34 |
}
|
35 |
|
36 |
return prediction
|
37 |
except Exception as e:
|
38 |
+
error_message = str(e)
|
39 |
+
print(f"Error processing file: {error_message}")
|
40 |
+
return {"error": error_message}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
# Define the Gradio interface
|
43 |
iface = gr.Interface(
|
|
|
49 |
)
|
50 |
|
51 |
# Deploy the Gradio interface
|
52 |
+
iface.launch(share=True)
|