Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,6 +11,12 @@ import os
|
|
11 |
from transformers import pipeline
|
12 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
from PIL import Image
|
15 |
|
16 |
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration, TextIteratorStreamer
|
@@ -18,6 +24,8 @@ processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-h
|
|
18 |
model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
19 |
model.to("cuda:0")
|
20 |
|
|
|
|
|
21 |
ASR_MODEL_NAME = "openai/whisper-large-v3"
|
22 |
ASR_BATCH_SIZE = 8
|
23 |
ASR_CHUNK_LENGTH_S = 30
|
@@ -39,7 +47,7 @@ asr_pl = pipeline(
|
|
39 |
)
|
40 |
|
41 |
application_title = "Enlight Innovations Limited -- Demo"
|
42 |
-
application_description = "This demo is desgined to illustrate our basic
|
43 |
|
44 |
@spaces.GPU
|
45 |
def respond(
|
@@ -113,6 +121,7 @@ transcribe_interface = gr.Interface(
|
|
113 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
114 |
"""
|
115 |
chatbot_main = gr.Chatbot(label="Extraction Output")
|
|
|
116 |
chatbot_sys_output = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
|
117 |
chatbot_max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max. New Tokens")
|
118 |
chatbot_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
@@ -126,9 +135,11 @@ chatbot_top_p = gr.Slider(
|
|
126 |
|
127 |
chat_interface = gr.ChatInterface(
|
128 |
respond,
|
|
|
129 |
title=application_title,
|
130 |
description=application_description,
|
131 |
chatbot=chatbot_main,
|
|
|
132 |
additional_inputs=[
|
133 |
chatbot_sys_output,
|
134 |
chatbot_max_tokens,
|
@@ -156,6 +167,10 @@ with gr.Blocks() as demo:
|
|
156 |
outputs=task_output
|
157 |
)
|
158 |
|
|
|
|
|
|
|
|
|
159 |
|
160 |
if __name__ == "__main__":
|
161 |
demo.queue().launch() #demo.launch()
|
|
|
11 |
from transformers import pipeline
|
12 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
13 |
|
14 |
+
from langchain.prompts import PromptTemplate
|
15 |
+
from langchain.vectorstores import FAISS
|
16 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
17 |
+
from langchain_community import
|
18 |
+
from langchain_text_splitters import SentenceTransformersTokenTextSplitter
|
19 |
+
|
20 |
from PIL import Image
|
21 |
|
22 |
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration, TextIteratorStreamer
|
|
|
24 |
model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
|
25 |
model.to("cuda:0")
|
26 |
|
27 |
+
embeddings_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
|
28 |
+
|
29 |
ASR_MODEL_NAME = "openai/whisper-large-v3"
|
30 |
ASR_BATCH_SIZE = 8
|
31 |
ASR_CHUNK_LENGTH_S = 30
|
|
|
47 |
)
|
48 |
|
49 |
application_title = "Enlight Innovations Limited -- Demo"
|
50 |
+
application_description = "This demo is desgined to illustrate our basic ideas and feasibility in implementation."
|
51 |
|
52 |
@spaces.GPU
|
53 |
def respond(
|
|
|
121 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
122 |
"""
|
123 |
chatbot_main = gr.Chatbot(label="Extraction Output")
|
124 |
+
chatbot_main_input = gr.MultimodalTextbox({"text": "Choose the referred material(s) and ask your question.", "files":[]})
|
125 |
chatbot_sys_output = gr.Textbox(value="You are a friendly Chatbot.", label="System Message")
|
126 |
chatbot_max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max. New Tokens")
|
127 |
chatbot_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
|
|
135 |
|
136 |
chat_interface = gr.ChatInterface(
|
137 |
respond,
|
138 |
+
multimodal=True,
|
139 |
title=application_title,
|
140 |
description=application_description,
|
141 |
chatbot=chatbot_main,
|
142 |
+
textbox=chatbot_main_input,
|
143 |
additional_inputs=[
|
144 |
chatbot_sys_output,
|
145 |
chatbot_max_tokens,
|
|
|
167 |
outputs=task_output
|
168 |
)
|
169 |
|
170 |
+
def on_selected_tab(selected_tab):
|
171 |
+
print(f"Selected tab: {selected_tab['value']}, Selected state: {selected_tab['selected']}")
|
172 |
+
demo.select(on_selected_tab)
|
173 |
+
|
174 |
|
175 |
if __name__ == "__main__":
|
176 |
demo.queue().launch() #demo.launch()
|