Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,97 +1,86 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
def
|
9 |
-
return
|
10 |
|
11 |
-
def
|
12 |
-
|
13 |
-
|
|
|
14 |
|
15 |
-
def
|
16 |
-
return
|
17 |
|
18 |
-
def
|
19 |
-
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
return qa_model(context=context, question=question)["answer"]
|
25 |
|
26 |
-
def
|
27 |
-
|
28 |
-
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
|
35 |
-
def process_generation(prompt):
|
36 |
-
generator_model = load_generator_model()
|
37 |
-
return generator_model(prompt, max_length=50)[0]["generated_text"]
|
38 |
|
39 |
-
def process_summarization(text):
|
40 |
-
summarizer_model = load_summarizer_model()
|
41 |
-
return summarizer_model(text, max_length=150, min_length=40, do_sample=False)[0]["summary_text"]
|
42 |
|
43 |
-
# Gradio Interface
|
44 |
with gr.Blocks() as demo:
|
45 |
-
gr.Markdown("Choose an NLP task and input the required text.")
|
46 |
-
|
47 |
with gr.Tab("Single Models"):
|
48 |
-
gr.
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
if task == "Translation":
|
88 |
-
translation = process_translation(input_text, "nl") # Default to Dutch translation
|
89 |
-
print("Translation result (Multi-model):", translation)
|
90 |
-
output_text_multi.update(translation)
|
91 |
-
else:
|
92 |
-
output_text_multi.update(eval(f"process_{task.lower()}")(input_text))
|
93 |
-
print("Output updated (Multi-model)")
|
94 |
-
|
95 |
-
execute_button_multi.click(execute_task_multi)
|
96 |
-
|
97 |
-
demo.launch()
|
|
|
1 |
+
|
2 |
+
|
3 |
import gradio as gr
|
4 |
from transformers import pipeline
|
5 |
|
6 |
+
#pipelines
|
7 |
+
qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
|
8 |
+
classification_pipeline = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
9 |
+
translation_pipeline = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
|
10 |
+
topic_classification_pipeline = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english") # Fine-tuned model for topic classification
|
11 |
+
summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cnn")
|
12 |
+
|
13 |
+
#functions
|
14 |
|
15 |
+
def answer_question(context, question):
|
16 |
+
return qa_pipeline(question=question, context=context)["answer"]
|
17 |
|
18 |
+
def classify_text(text, labels):
|
19 |
+
labels = labels.split(",")
|
20 |
+
results = classification_pipeline(text, candidate_labels=labels)
|
21 |
+
return {label: float(f"{prob:.4f}") for label, prob in zip(results["labels"], results["scores"])}
|
22 |
|
23 |
+
def translate_text(text):
|
24 |
+
return translation_pipeline(text)[0]['translation_text'] if text else "No translation available"
|
25 |
|
26 |
+
def classify_topic(text):
|
27 |
+
results = topic_classification_pipeline(text)
|
28 |
+
return ", ".join([f"{result['label']}: {result['score']:.4f}" for result in results])
|
29 |
|
30 |
+
def summarize_text(text):
|
31 |
+
result = summarization_pipeline(text, max_length=60)
|
32 |
+
return result[0]['summary_text'] if result else "No summary available"
|
|
|
33 |
|
34 |
+
def multi_model_interaction(text):
|
35 |
+
|
36 |
+
summary = summarize_text(text)
|
37 |
+
translated_summary = translate_text(summary)
|
38 |
|
39 |
+
return {
|
40 |
+
"Summary (English)": summary,
|
41 |
+
"Summary (French)": translated_summary,
|
42 |
+
}
|
43 |
|
|
|
|
|
|
|
44 |
|
|
|
|
|
|
|
45 |
|
|
|
46 |
with gr.Blocks() as demo:
|
|
|
|
|
47 |
with gr.Tab("Single Models"):
|
48 |
+
with gr.Column():
|
49 |
+
gr.Markdown("### Question Answering")
|
50 |
+
context = gr.Textbox(label="Context")
|
51 |
+
question = gr.Textbox(label="Question")
|
52 |
+
answer_output = gr.Text(label="Answer")
|
53 |
+
gr.Button("Answer").click(answer_question, inputs=[context, question], outputs=answer_output)
|
54 |
+
|
55 |
+
with gr.Column():
|
56 |
+
gr.Markdown("### Zero-Shot Classification")
|
57 |
+
text_zsc = gr.Textbox(label="Text")
|
58 |
+
labels = gr.Textbox(label="Labels (comma separated)")
|
59 |
+
classification_result = gr.JSON(label="Classification Results")
|
60 |
+
gr.Button("Classify").click(classify_text, inputs=[text_zsc, labels], outputs=classification_result)
|
61 |
+
|
62 |
+
with gr.Column():
|
63 |
+
gr.Markdown("### Translation")
|
64 |
+
text_to_translate = gr.Textbox(label="Text")
|
65 |
+
translated_text = gr.Text(label="Translated Text")
|
66 |
+
gr.Button("Translate").click(translate_text, inputs=[text_to_translate], outputs=translated_text)
|
67 |
+
|
68 |
+
with gr.Column():
|
69 |
+
gr.Markdown("### Sentiment Analysis")
|
70 |
+
text_for_sentiment = gr.Textbox(label="Text for Sentiment Analysis")
|
71 |
+
sentiment_result = gr.Text(label="Sentiment")
|
72 |
+
gr.Button("Classify Sentiment").click(classify_topic, inputs=[text_for_sentiment], outputs=sentiment_result)
|
73 |
+
|
74 |
+
with gr.Column():
|
75 |
+
gr.Markdown("### Summarization")
|
76 |
+
text_to_summarize = gr.Textbox(label="Text")
|
77 |
+
summary = gr.Text(label="Summary")
|
78 |
+
gr.Button("Summarize").click(summarize_text, inputs=[text_to_summarize], outputs=summary)
|
79 |
+
|
80 |
+
with gr.Tab("Multi-Model"):
|
81 |
+
gr.Markdown("### Multi-Model")
|
82 |
+
input_text = gr.Textbox(label="Enter Text for Multi-Model Analysis")
|
83 |
+
multi_output = gr.Text(label="Results")
|
84 |
+
gr.Button("Process").click(multi_model_interaction, inputs=[input_text], outputs=multi_output)
|
85 |
+
|
86 |
+
demo.launch(share=True, debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|