File size: 1,622 Bytes
98c0f54 18332e8 2292b28 f4067be 18332e8 0172e31 2bb61b8 c4d5545 2bb61b8 12b0ed7 95d05cb 8cb1867 98c0f54 18332e8 98c0f54 18332e8 1aa90a2 95d05cb 8b24c55 95d05cb 367a8a1 95d05cb 12b0ed7 fcf7672 1cfe661 f4067be bcb2ab6 12b0ed7 2292b28 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from transformers import pipeline
from pptx import Presentation # Import the Presentation class
# Create a text classification pipeline
classifier = pipeline("text-classification", model="Ahmed235/roberta_classification", tokenizer="Ahmed235/roberta_classification")
def extract_text_from_pptx(file_path):
presentation = Presentation(file_path)
text = []
for slide_number, slide in enumerate(presentation.slides, start=1):
for shape in slide.shapes:
if hasattr(shape, "text"):
text.append(shape.text)
return "\n".join(text)
def predict_pptx_content(file_path):
try:
extracted_text = extract_text_from_pptx(file_path)
# Perform inference using the pipeline
result = classifier(extracted_text)
predicted_label = result[0]['label']
predicted_probability = result[0]['score']
prediction = {
"Evaluation": f"Evaluate the topic according to {predicted_label} is: {predicted_probability}",
}
return prediction
except Exception as e:
# Log the error details
print(f"Error in predict_pptx_content: {e}")
return {"error": str(e)}
# Define the Gradio interface
iface = gr.Interface(
fn=predict_pptx_content,
inputs=gr.File(type="filepath", label="Upload PowerPoint (.pptx) file"),
outputs=["text"], # Predicted Label, Evaluation
live=False, # Change to True for one-time analysis
title="<h1 style='color: lightgreen; text-align: center;'>HackTalk Analyzer</h1>",
)
# Deploy the Gradio interface
iface.launch(share=True)
|