jaimik69 zszazi commited on
Commit
63bd9c2
Β·
1 Parent(s): b0afde6

Gradio App Jupyter Notebook (#3)

Browse files

- Gradio App Jupyter Notebook (a1653963617fdaec319ba87468810585b315883a)


Co-authored-by: Sai <[email protected]>

Files changed (1) hide show
  1. DL_Project_Final.ipynb +181 -0
DL_Project_Final.ipynb ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "source": [
22
+ "!pip install -q gradio_client gradio torch torchaudio transformers"
23
+ ],
24
+ "metadata": {
25
+ "colab": {
26
+ "base_uri": "https://localhost:8080/"
27
+ },
28
+ "id": "CA8lr7CjQnU5",
29
+ "outputId": "1a2dfb9c-8e3d-4177-cc19-e17ceb6da284"
30
+ },
31
+ "execution_count": null,
32
+ "outputs": [
33
+ {
34
+ "output_type": "stream",
35
+ "name": "stdout",
36
+ "text": [
37
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m16.5/16.5 MB\u001b[0m \u001b[31m89.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
38
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.9/92.9 kB\u001b[0m \u001b[31m13.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
39
+ "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
40
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m138.7/138.7 kB\u001b[0m \u001b[31m17.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
41
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m381.9/381.9 kB\u001b[0m \u001b[31m24.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
42
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.7/45.7 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
43
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m59.7/59.7 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
44
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.1/2.1 MB\u001b[0m \u001b[31m76.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
45
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.0/67.0 kB\u001b[0m \u001b[31m9.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
46
+ "\u001b[?25h Building wheel for ffmpy (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
47
+ "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
48
+ "lida 0.0.10 requires kaleido, which is not installed.\n",
49
+ "llmx 0.0.15a0 requires cohere, which is not installed.\n",
50
+ "llmx 0.0.15a0 requires openai, which is not installed.\n",
51
+ "llmx 0.0.15a0 requires tiktoken, which is not installed.\n",
52
+ "tensorflow-probability 0.22.0 requires typing-extensions<4.6.0, but you have typing-extensions 4.8.0 which is incompatible.\u001b[0m\u001b[31m\n",
53
+ "\u001b[0m"
54
+ ]
55
+ }
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "source": [
61
+ "import gradio as gr\n",
62
+ "from transformers import pipeline\n",
63
+ "import numpy as np\n",
64
+ "import requests\n",
65
+ "from PIL import Image\n",
66
+ "from transformers import BlipProcessor, BlipForQuestionAnswering\n",
67
+ "\n",
68
+ "transcriber = pipeline(\"automatic-speech-recognition\", model=\"openai/whisper-base.en\")\n",
69
+ "\n",
70
+ "processor = BlipProcessor.from_pretrained(\"jaimik69/blip_finetuned\")\n",
71
+ "model = BlipForQuestionAnswering.from_pretrained(\"jaimik69/blip_finetuned\")"
72
+ ],
73
+ "metadata": {
74
+ "colab": {
75
+ "base_uri": "https://localhost:8080/"
76
+ },
77
+ "id": "ExsvuGVbQpHK",
78
+ "outputId": "98d83b01-4cd0-4507-bc93-942e998a6c85"
79
+ },
80
+ "execution_count": null,
81
+ "outputs": [
82
+ {
83
+ "output_type": "stream",
84
+ "name": "stderr",
85
+ "text": [
86
+ "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n"
87
+ ]
88
+ }
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "source": [
94
+ "import torch\n",
95
+ "\n",
96
+ "def transcribe(audio, img_url):\n",
97
+ " sr, y = audio\n",
98
+ " y = y.astype(np.float32)\n",
99
+ " y /= np.max(np.abs(y))\n",
100
+ " prompt = transcriber({\"sampling_rate\": sr, \"raw\": y})[\"text\"]\n",
101
+ " raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')\n",
102
+ " inputs = processor(raw_image, text=prompt, return_tensors=\"pt\")\n",
103
+ " generated_ids = model.generate(**inputs, max_new_tokens=10)\n",
104
+ " aqa_ans = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n",
105
+ "\n",
106
+ " return prompt, aqa_ans, gr.Image(img_url)\n",
107
+ "\n",
108
+ "demo = gr.Interface(\n",
109
+ " transcribe,\n",
110
+ " [gr.Audio(sources=[\"microphone\"], label = \"User Audio Input\"), gr.Textbox(label=\"User Image URL\")],\n",
111
+ " [gr.Textbox(label=\"Question\"), gr.Textbox(label=\"Answer\"), gr.Image(label = \"User Image\")],\n",
112
+ " title = 'Vox Helios', theme = 'dark-grass',\n",
113
+ " description = 'An Audio Question Answering Project'\n",
114
+ ")\n",
115
+ "\n",
116
+ "if __name__ == \"__main__\":\n",
117
+ " demo.launch(debug=True,auth=(\"sai\", \"letmein\"))"
118
+ ],
119
+ "metadata": {
120
+ "colab": {
121
+ "base_uri": "https://localhost:8080/",
122
+ "height": 719
123
+ },
124
+ "id": "V0vKhLvCQyUj",
125
+ "outputId": "9ce5a8c8-26aa-4dd7-caba-56583799f6a0"
126
+ },
127
+ "execution_count": null,
128
+ "outputs": [
129
+ {
130
+ "output_type": "stream",
131
+ "name": "stderr",
132
+ "text": [
133
+ "/usr/local/lib/python3.10/dist-packages/gradio/blocks.py:528: UserWarning: Cannot load dark-grass. Caught Exception: The space dark-grass does not exist\n",
134
+ " warnings.warn(f\"Cannot load {theme}. Caught Exception: {str(e)}\")\n"
135
+ ]
136
+ },
137
+ {
138
+ "output_type": "stream",
139
+ "name": "stdout",
140
+ "text": [
141
+ "Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
142
+ "\n",
143
+ "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n",
144
+ "Running on public URL: https://90c36f56b801550f7e.gradio.live\n",
145
+ "\n",
146
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
147
+ ]
148
+ },
149
+ {
150
+ "output_type": "display_data",
151
+ "data": {
152
+ "text/plain": [
153
+ "<IPython.core.display.HTML object>"
154
+ ],
155
+ "text/html": [
156
+ "<div><iframe src=\"https://90c36f56b801550f7e.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
157
+ ]
158
+ },
159
+ "metadata": {}
160
+ },
161
+ {
162
+ "output_type": "stream",
163
+ "name": "stdout",
164
+ "text": [
165
+ "Keyboard interruption in main thread... closing server.\n",
166
+ "Killing tunnel 127.0.0.1:7861 <> https://90c36f56b801550f7e.gradio.live\n"
167
+ ]
168
+ }
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "source": [],
174
+ "metadata": {
175
+ "id": "8nnM-fTOhVex"
176
+ },
177
+ "execution_count": null,
178
+ "outputs": []
179
+ }
180
+ ]
181
+ }