demavior commited on
Commit
4836f74
·
verified ·
1 Parent(s): fe186c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -9
app.py CHANGED
@@ -1,18 +1,30 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
 
6
- def predict(input_img):
7
- predictions = pipeline(input_img)
8
- return input_img, {p["label"]: p["score"] for p in predictions}
9
 
10
- gradio_app = gr.Interface(
11
- predict,
12
- inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
13
- outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
14
- title="Hot Dog? Or Not?",
 
 
15
  )
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  if __name__ == "__main__":
18
  gradio_app.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
 
4
 
 
 
 
5
 
6
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
7
+
8
+ pipe = pipeline(
9
+ "automatic-speech-recognition",
10
+ model="openai/whisper-small",
11
+ chunk_length_s=30,
12
+ device=device,
13
  )
14
 
15
+ ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
16
+ sample = ds[0]["audio"]
17
+
18
+ prediction = pipe(sample.copy(), batch_size=8)["text"]
19
+ " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."
20
+
21
+ # we can also return timestamps for the predictions
22
+ prediction = pipe(sample.copy(), batch_size=8, return_timestamps=True)["chunks"]
23
+ [{'text': ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.',
24
+ 'timestamp': (0.0, 5.44)}]
25
+
26
+
27
+
28
+
29
  if __name__ == "__main__":
30
  gradio_app.launch()