emielclopterop commited on
Commit
07db68b
·
verified ·
1 Parent(s): b2d589e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -82
app.py CHANGED
@@ -1,97 +1,86 @@
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Lazy loading: Define functions to load models only when needed
5
- def load_qa_model():
6
- return pipeline("question-answering", model="bert-large-uncased-whole-word-masking-finetuned-squad")
 
 
 
 
 
7
 
8
- def load_classifier_model():
9
- return pipeline("zero-shot-classification", model="MoritzLaurer/deberta-v3-base-zeroshot-v1.1-all-33")
10
 
11
- def load_translator_model(target_language):
12
- model_name = f"translation_en_to_{target_language}"
13
- return pipeline("translation_en_to_nl", model=model_name)
 
14
 
15
- def load_generator_model():
16
- return pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B", tokenizer="EleutherAI/gpt-neo-2.7B")
17
 
18
- def load_summarizer_model():
19
- return pipeline("summarization", model="facebook/bart-large-cnn")
 
20
 
21
- # Define functions to process inputs
22
- def process_qa(context, question):
23
- qa_model = load_qa_model()
24
- return qa_model(context=context, question=question)["answer"]
25
 
26
- def process_classifier(text, labels):
27
- classifier_model = load_classifier_model()
28
- return classifier_model(text, labels)["labels"][0]
 
29
 
30
- def process_translation(text, target_language):
31
- translator_model = load_translator_model(target_language)
32
- translation = translator_model(text)[0]["translation_text"]
33
- return translation
34
 
35
- def process_generation(prompt):
36
- generator_model = load_generator_model()
37
- return generator_model(prompt, max_length=50)[0]["generated_text"]
38
 
39
- def process_summarization(text):
40
- summarizer_model = load_summarizer_model()
41
- return summarizer_model(text, max_length=150, min_length=40, do_sample=False)[0]["summary_text"]
42
 
43
- # Gradio Interface
44
  with gr.Blocks() as demo:
45
- gr.Markdown("Choose an NLP task and input the required text.")
46
-
47
  with gr.Tab("Single Models"):
48
- gr.Markdown("This tab is for single models demonstration.")
49
- # Single models interface
50
- task_select_single = gr.Dropdown(["Question Answering", "Zero-Shot Classification", "Translation", "Text Generation", "Summarization"], label="Select Task")
51
- input_fields_single = [gr.Textbox(label="Input")]
52
- if task_select_single.value == "Zero-Shot Classification":
53
- input_fields_single.append(gr.CheckboxGroup(["Label 1", "Label 2", "Label 3"], label="Labels"))
54
- elif task_select_single.value == "Translation":
55
- input_fields_single.append(gr.Dropdown(["nl", "fr", "es", "de"], label="Target Language"))
56
- output_text_single = gr.Textbox(label="Output")
57
-
58
- execute_button_single = gr.Button("Execute")
59
-
60
- def execute_task_single():
61
- task = task_select_single.value
62
- inputs = [field.value for field in input_fields_single]
63
- print("Inputs (Single Models):", inputs)
64
- if task == "Translation":
65
- translation = process_translation(*inputs)
66
- print("Translation result (Single Models):", translation)
67
- output_text_single.update(translation)
68
- else:
69
- output_text_single.update(eval(f"process_{task.lower()}")(*inputs))
70
- print("Output updated (Single Models)")
71
-
72
- execute_button_single.click(execute_task_single)
73
-
74
- with gr.Tab("Multi-model"):
75
- gr.Markdown("This tab is for multi-model demonstration.")
76
- # Multi-model interface
77
- task_select_multi = gr.Dropdown(["Question Answering", "Zero-Shot Classification", "Translation", "Text Generation", "Summarization"], label="Select Task")
78
- input_text_multi = gr.Textbox(label="Input")
79
- output_text_multi = gr.Textbox(label="Output")
80
-
81
- execute_button_multi = gr.Button("Execute")
82
-
83
- def execute_task_multi():
84
- task = task_select_multi.value
85
- input_text = input_text_multi.value
86
- print("Input (Multi-model):", input_text)
87
- if task == "Translation":
88
- translation = process_translation(input_text, "nl") # Default to Dutch translation
89
- print("Translation result (Multi-model):", translation)
90
- output_text_multi.update(translation)
91
- else:
92
- output_text_multi.update(eval(f"process_{task.lower()}")(input_text))
93
- print("Output updated (Multi-model)")
94
-
95
- execute_button_multi.click(execute_task_multi)
96
-
97
- demo.launch()
 
1
+
2
+
3
  import gradio as gr
4
  from transformers import pipeline
5
 
6
+ #pipelines
7
+ qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2")
8
+ classification_pipeline = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
9
+ translation_pipeline = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
10
+ topic_classification_pipeline = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english") # Fine-tuned model for topic classification
11
+ summarization_pipeline = pipeline("summarization", model="facebook/bart-large-cnn")
12
+
13
+ #functions
14
 
15
+ def answer_question(context, question):
16
+ return qa_pipeline(question=question, context=context)["answer"]
17
 
18
+ def classify_text(text, labels):
19
+ labels = labels.split(",")
20
+ results = classification_pipeline(text, candidate_labels=labels)
21
+ return {label: float(f"{prob:.4f}") for label, prob in zip(results["labels"], results["scores"])}
22
 
23
+ def translate_text(text):
24
+ return translation_pipeline(text)[0]['translation_text'] if text else "No translation available"
25
 
26
+ def classify_topic(text):
27
+ results = topic_classification_pipeline(text)
28
+ return ", ".join([f"{result['label']}: {result['score']:.4f}" for result in results])
29
 
30
+ def summarize_text(text):
31
+ result = summarization_pipeline(text, max_length=60)
32
+ return result[0]['summary_text'] if result else "No summary available"
 
33
 
34
+ def multi_model_interaction(text):
35
+
36
+ summary = summarize_text(text)
37
+ translated_summary = translate_text(summary)
38
 
39
+ return {
40
+ "Summary (English)": summary,
41
+ "Summary (French)": translated_summary,
42
+ }
43
 
 
 
 
44
 
 
 
 
45
 
 
46
  with gr.Blocks() as demo:
 
 
47
  with gr.Tab("Single Models"):
48
+ with gr.Column():
49
+ gr.Markdown("### Question Answering")
50
+ context = gr.Textbox(label="Context")
51
+ question = gr.Textbox(label="Question")
52
+ answer_output = gr.Text(label="Answer")
53
+ gr.Button("Answer").click(answer_question, inputs=[context, question], outputs=answer_output)
54
+
55
+ with gr.Column():
56
+ gr.Markdown("### Zero-Shot Classification")
57
+ text_zsc = gr.Textbox(label="Text")
58
+ labels = gr.Textbox(label="Labels (comma separated)")
59
+ classification_result = gr.JSON(label="Classification Results")
60
+ gr.Button("Classify").click(classify_text, inputs=[text_zsc, labels], outputs=classification_result)
61
+
62
+ with gr.Column():
63
+ gr.Markdown("### Translation")
64
+ text_to_translate = gr.Textbox(label="Text")
65
+ translated_text = gr.Text(label="Translated Text")
66
+ gr.Button("Translate").click(translate_text, inputs=[text_to_translate], outputs=translated_text)
67
+
68
+ with gr.Column():
69
+ gr.Markdown("### Sentiment Analysis")
70
+ text_for_sentiment = gr.Textbox(label="Text for Sentiment Analysis")
71
+ sentiment_result = gr.Text(label="Sentiment")
72
+ gr.Button("Classify Sentiment").click(classify_topic, inputs=[text_for_sentiment], outputs=sentiment_result)
73
+
74
+ with gr.Column():
75
+ gr.Markdown("### Summarization")
76
+ text_to_summarize = gr.Textbox(label="Text")
77
+ summary = gr.Text(label="Summary")
78
+ gr.Button("Summarize").click(summarize_text, inputs=[text_to_summarize], outputs=summary)
79
+
80
+ with gr.Tab("Multi-Model"):
81
+ gr.Markdown("### Multi-Model")
82
+ input_text = gr.Textbox(label="Enter Text for Multi-Model Analysis")
83
+ multi_output = gr.Text(label="Results")
84
+ gr.Button("Process").click(multi_model_interaction, inputs=[input_text], outputs=multi_output)
85
+
86
+ demo.launch(share=True, debug=True)