hema1 commited on
Commit
f8b370e
·
1 Parent(s): 331a00e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -0
app.py CHANGED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import gradio as gr
3
+ # importing necessary libraries
4
+ from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
7
+ model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad",return_dict=False)
8
+ from transformers import pipeline
9
+
10
+ nlp = pipeline("question-answering", model=model, tokenizer=tokenizer)
11
+
12
+ context = "My name is Hema Raikhola, i am a data scientist and machine learning engineer."
13
+ question = "what is my profession?"
14
+
15
+ result = nlp(question = question, context=context)
16
+
17
+ print(f"QUESTION: {question}")
18
+ print(f"ANSWER: {result['answer']}")
19
+
20
+ # creating the function
21
+ def func(context, question):
22
+ result = nlp(question = question, context=context)
23
+ return result['answer']
24
+
25
+ example_1 = "(1) Kanisha,Preeti,Hema and Shaksham are the team members.They are working on a science project"
26
+ qst_1 = "who are the team members?"
27
+
28
+ example_2 = "(2) Natural Language Processing (NLP) allows machines to break down and interpret human language. It's at the core of tools we use every day – from translation software, chatbots, spam filters, and search engines, to grammar correction software, voice assistants, and social media monitoring tools."
29
+ qst_2 = "What is NLP used for?"
30
+
31
+
32
+ from transformers import ViltProcessor, ViltForQuestionAnswering
33
+
34
+
35
+ def getResult(query, image):
36
+ # prepare image + question
37
+ #image = Image.open(BytesIO(base64.b64decode(base64_encoded_image)))
38
+ text = query
39
+
40
+ processor = ViltProcessor.from_pretrained(
41
+ "dandelin/vilt-b32-finetuned-vqa")
42
+ model = ViltForQuestionAnswering.from_pretrained(
43
+ "dandelin/vilt-b32-finetuned-vqa")
44
+
45
+ # prepare inputs
46
+ encoding = processor(image, text, return_tensors="pt")
47
+
48
+ # forward pass
49
+ outputs = model(**encoding)
50
+ logits = outputs.logits
51
+ idx = logits.argmax(-1).item()
52
+ print("Predicted answer:", model.config.id2label[idx])
53
+ return model.config.id2label[idx]
54
+
55
+
56
+ #for youtube video summarization
57
+
58
+ from transformers import pipeline
59
+ from youtube_transcript_api import YouTubeTranscriptApi
60
+ import gradio as gr
61
+
62
+
63
+ def summarize(Youtube_Video_Link):
64
+ video_id = Youtube_Video_Link.split("=")[1]
65
+ try:
66
+ transcript = YouTubeTranscriptApi.get_transcript(video_id)
67
+ summarizer = pipeline('summarization',model='facebook/bart-large-cnn')
68
+ input_text = ""
69
+ for i in transcript:
70
+ input_text += ' ' + i['text']
71
+ num_iters = int(len(input_text)/1000)
72
+ summarized_text = []
73
+ for i in range(0, num_iters + 1):
74
+ start = 0
75
+ start = i * 1000
76
+ end = (i + 1) * 1000
77
+ print("input text \n" + input_text[start:end])
78
+ out = summarizer(input_text[start:end])
79
+ out = out[0]
80
+ out = out['summary_text']
81
+ print("Summarized text\n"+out)
82
+ summarized_text.append(out)
83
+ output_text=' '.join(summarized_text)
84
+ return output_text
85
+ except:
86
+ return "Some Error has occurred either with Video link passed is invalid or No Captions present for this video"
87
+
88
+ title = "YouTube Live Video Summarization"
89
+ examples = [("https://www.youtube.com/watch?v=zKvd1JwJ4Po"),("https://www.youtube.com/watch?v=9izcbNYmP8M"),]
90
+ description = "Get YouTube Video Summarization. Just Enter the YouTube Video link below. Make sure Video has Captions and it is not very long as Model Computation time will Increase."
91
+ Youtube_Video_Link = gr.Textbox("Input YouTube Link here (Note: This will take time if passed a long video)", show_label=False)
92
+ App= gr.Interface(fn=summarize, inputs=Youtube_Video_Link, outputs="text", examples=examples,description=description, title=title,)
93
+
94
+ ## finished youtube video summarization
95
+
96
+
97
+ # creating the interface
98
+ iface = gr.Interface(fn=getResult, inputs=[
99
+ "text", gr.Image(type="pil")], outputs="text")
100
+
101
+ # creating the interface
102
+ app = gr.Interface(fn=func,
103
+ inputs = ['textbox', 'text'],
104
+ outputs = gr.Textbox( lines=10),
105
+ title = 'Question Answering bot',
106
+ description = 'Input context and question, then get answers!',
107
+ examples = [[example_1, qst_1],
108
+ [example_2, qst_2]],
109
+ theme = "darkhuggingface",
110
+ Timeout =120,
111
+ allow_flagging="manual",
112
+ flagging_options=["incorrect", "ambiguous", "offensive", "other"],
113
+
114
+ ).queue()
115
+ # launching the app
116
+ gr.TabbedInterface([iface,app,App],["Visual QA","Text QA","Video Summarization"]).launch(auth = ('user','hema'), auth_message = "Check your Login details sent to your email")
117
+