Spaces:
Sleeping
Sleeping
Commit
·
c7bfded
1
Parent(s):
bd4b418
remove commented code and utilization details added
Browse files- .gitignore +1 -0
- app.py +26 -93
- requirements.txt +2 -1
.gitignore
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
flagged
|
2 |
path
|
|
|
|
1 |
flagged
|
2 |
path
|
3 |
+
__pycache__
|
app.py
CHANGED
@@ -1,89 +1,3 @@
|
|
1 |
-
# import gradio as gr
|
2 |
-
# from transformers import pipeline, AutoModelForImageSegmentation
|
3 |
-
# from gradio_imageslider import ImageSlider
|
4 |
-
# import torch
|
5 |
-
# from torchvision import transforms
|
6 |
-
# import spaces
|
7 |
-
# from PIL import Image
|
8 |
-
|
9 |
-
# import numpy as np
|
10 |
-
# import time
|
11 |
-
|
12 |
-
# birefnet = AutoModelForImageSegmentation.from_pretrained(
|
13 |
-
# "ZhengPeng7/BiRefNet", trust_remote_code=True
|
14 |
-
# )
|
15 |
-
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
-
|
17 |
-
# print("Using device:", device)
|
18 |
-
|
19 |
-
# birefnet.to(device)
|
20 |
-
# transform_image = transforms.Compose(
|
21 |
-
# [
|
22 |
-
# transforms.Resize((1024, 1024)),
|
23 |
-
# transforms.ToTensor(),
|
24 |
-
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
|
25 |
-
# ]
|
26 |
-
# )
|
27 |
-
|
28 |
-
# # @spaces.GPU
|
29 |
-
# # def PreProcess(image):
|
30 |
-
# # size = image.size
|
31 |
-
# # image = transform_image(image).unsqueeze(0).to(device)
|
32 |
-
|
33 |
-
# # with torch.no_grad():
|
34 |
-
# # preds = birefnet(image)[-1].sigmoid().cpu()
|
35 |
-
# # pred = preds[0].squeeze()
|
36 |
-
# # pred = transforms.ToPILImage()(pred)
|
37 |
-
# # mask = pred.resize(size)
|
38 |
-
# # # image.putalpha(mask)
|
39 |
-
# # return image
|
40 |
-
|
41 |
-
# @spaces.GPU
|
42 |
-
# def PreProcess(image):
|
43 |
-
# size = image.size # Save original size
|
44 |
-
# image_tensor = transform_image(image).unsqueeze(0).to(device) # Transform the image into a tensor
|
45 |
-
|
46 |
-
# with torch.no_grad():
|
47 |
-
# preds = birefnet(image_tensor)[-1].sigmoid().cpu() # Get predictions
|
48 |
-
# pred = preds[0].squeeze()
|
49 |
-
|
50 |
-
# # Convert the prediction tensor to a PIL image
|
51 |
-
# pred_pil = transforms.ToPILImage()(pred)
|
52 |
-
|
53 |
-
# # Resize the mask to match the original image size
|
54 |
-
# mask = pred_pil.resize(size)
|
55 |
-
|
56 |
-
# # Convert the original image (passed as input) to a PIL image
|
57 |
-
# image_pil = image.convert("RGBA") # Ensure the image has an alpha channel
|
58 |
-
|
59 |
-
# # Apply the alpha mask to the image
|
60 |
-
# image_pil.putalpha(mask)
|
61 |
-
|
62 |
-
# return image_pil
|
63 |
-
|
64 |
-
# def segment_image(image):
|
65 |
-
# start = time.time()
|
66 |
-
# image = Image.fromarray(image)
|
67 |
-
# image = image.convert("RGB")
|
68 |
-
# org = image.copy()
|
69 |
-
# image = PreProcess(image)
|
70 |
-
# time_taken = np.round((time.time() - start),2)
|
71 |
-
# return (image, org), time_taken
|
72 |
-
|
73 |
-
# slider = ImageSlider(label='birefnet', type="pil")
|
74 |
-
# image = gr.Image(label="Upload an Image")
|
75 |
-
|
76 |
-
# butterfly = Image.open("butterfly.png")
|
77 |
-
# Dog = Image.open('Dog.jpg')
|
78 |
-
|
79 |
-
# time_taken = gr.Textbox(label="Time taken", type="text")
|
80 |
-
|
81 |
-
# demo = gr.Interface(
|
82 |
-
# segment_image, inputs=image, outputs=[slider,time_taken], examples=[butterfly,Dog], api_name="BiRefNet")
|
83 |
-
|
84 |
-
# if __name__ == '__main__' :
|
85 |
-
# demo.launch()
|
86 |
-
|
87 |
import requests
|
88 |
import gradio as gr
|
89 |
import tempfile
|
@@ -91,6 +5,7 @@ import os
|
|
91 |
from transformers import pipeline
|
92 |
from huggingface_hub import InferenceClient
|
93 |
import time
|
|
|
94 |
# import torch
|
95 |
# import numpy as np
|
96 |
|
@@ -104,6 +19,9 @@ pipe = pipeline("automatic-speech-recognition", model=model_id) #, device=device
|
|
104 |
def transcribe(inputs, use_api):
|
105 |
start = time.time()
|
106 |
API_STATUS = ''
|
|
|
|
|
|
|
107 |
if inputs is None:
|
108 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
109 |
|
@@ -120,15 +38,24 @@ def transcribe(inputs, use_api):
|
|
120 |
res = pipe(inputs, chunk_length_s=30)["text"]
|
121 |
|
122 |
end = time.time() - start
|
123 |
-
|
124 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
except Exception as e:
|
127 |
return fr'Error: {str(e)}', None
|
128 |
|
129 |
-
def calculate_time_taken(start_time):
|
130 |
-
return time.time() - start_time
|
131 |
-
|
132 |
demo = gr.Blocks()
|
133 |
|
134 |
mf_transcribe = gr.Interface(
|
@@ -138,7 +65,10 @@ mf_transcribe = gr.Interface(
|
|
138 |
# gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
139 |
gr.Checkbox(label="Use API", value=False)
|
140 |
],
|
141 |
-
outputs=[
|
|
|
|
|
|
|
142 |
title="Welcome to QuickTranscribe",
|
143 |
description=(
|
144 |
"Transcribe long-form microphone or audio inputs with the click of a button!"
|
@@ -153,7 +83,10 @@ file_transcribe = gr.Interface(
|
|
153 |
# gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
154 |
gr.Checkbox(label="Use API", value=False) # Checkbox for API usage
|
155 |
],
|
156 |
-
outputs=[
|
|
|
|
|
|
|
157 |
title="Welcome to QuickTranscribe",
|
158 |
description=(
|
159 |
"Transcribe long-form microphone or audio inputs with the click of a button!"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import requests
|
2 |
import gradio as gr
|
3 |
import tempfile
|
|
|
5 |
from transformers import pipeline
|
6 |
from huggingface_hub import InferenceClient
|
7 |
import time
|
8 |
+
import psutil
|
9 |
# import torch
|
10 |
# import numpy as np
|
11 |
|
|
|
19 |
def transcribe(inputs, use_api):
|
20 |
start = time.time()
|
21 |
API_STATUS = ''
|
22 |
+
|
23 |
+
memory_before = psutil.Process(os.getpid()).memory_info().rss
|
24 |
+
|
25 |
if inputs is None:
|
26 |
raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
|
27 |
|
|
|
38 |
res = pipe(inputs, chunk_length_s=30)["text"]
|
39 |
|
40 |
end = time.time() - start
|
41 |
+
|
42 |
+
# Measure memory after running the transcription process
|
43 |
+
memory_after = psutil.Process(os.getpid()).memory_info().rss
|
44 |
+
|
45 |
+
# Calculate the difference to see how much memory was used by the code
|
46 |
+
memory_used = memory_after - memory_before # Memory used in bytes
|
47 |
+
memory_used_gb = round(memory_used / (1024 ** 3), 2) # Convert memory used to GB
|
48 |
+
total_memory_gb = round(psutil.virtual_memory().total / (1024 ** 3), 2) # Total RAM in GB
|
49 |
+
|
50 |
+
# Calculate the percentage of RAM used by this process
|
51 |
+
memory_used_percent = round((memory_used / psutil.virtual_memory().total) * 100, 2)
|
52 |
+
|
53 |
+
return res, API_STATUS + str(round(end, 2)) + ' seconds', f"RAM Used by code: {memory_used_gb} GB ({memory_used_percent}%) Total RAM: {total_memory_gb}"
|
54 |
+
|
55 |
|
56 |
except Exception as e:
|
57 |
return fr'Error: {str(e)}', None
|
58 |
|
|
|
|
|
|
|
59 |
demo = gr.Blocks()
|
60 |
|
61 |
mf_transcribe = gr.Interface(
|
|
|
65 |
# gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
66 |
gr.Checkbox(label="Use API", value=False)
|
67 |
],
|
68 |
+
outputs=[gr.Textbox(label="Transcribed Text", type="text"),
|
69 |
+
gr.Textbox(label="Time taken", type="text"),
|
70 |
+
gr.Textbox(label="Utilization", type="text")
|
71 |
+
], # Placeholder for transcribed text and time taken
|
72 |
title="Welcome to QuickTranscribe",
|
73 |
description=(
|
74 |
"Transcribe long-form microphone or audio inputs with the click of a button!"
|
|
|
83 |
# gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
|
84 |
gr.Checkbox(label="Use API", value=False) # Checkbox for API usage
|
85 |
],
|
86 |
+
outputs=[ gr.Textbox(label="Transcribed Text", type="text"),
|
87 |
+
gr.Textbox(label="Time taken", type="text"),
|
88 |
+
gr.Textbox(label="Utilization", type="text")
|
89 |
+
], # Placeholder for transcribed text and time taken
|
90 |
title="Welcome to QuickTranscribe",
|
91 |
description=(
|
92 |
"Transcribe long-form microphone or audio inputs with the click of a button!"
|
requirements.txt
CHANGED
@@ -4,4 +4,5 @@ requests
|
|
4 |
huggingface_hub
|
5 |
pytest
|
6 |
gradio
|
7 |
-
ffmpeg
|
|
|
|
4 |
huggingface_hub
|
5 |
pytest
|
6 |
gradio
|
7 |
+
ffmpeg
|
8 |
+
psutil
|