mgutierrez commited on
Commit
8aa0da7
·
1 Parent(s): a1d2c19

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -40
app.py CHANGED
@@ -1,65 +1,53 @@
1
- '''Imports'''
2
  import tensorflow as tf
3
- #import requests
4
  from transformers import pipeline
5
- import gradio as gr
6
 
7
- '''Config inception'''
8
  inception_net = tf.keras.applications.MobileNetV2()
9
-
10
- '''Making request and set database'''
11
- #response = requests.get("https://git.io/JJkYN")
12
- #tags = response.text.split("\n")
13
-
14
- '''Define model and classify pipelines'''
15
- trans = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-xlsr-53-spanish")
16
- classify = pipeline("text-classification", model="pysentimiento/robertuito-sentiment-analysis")
17
-
18
- '''Define functions for demo'''
19
- def classify_image(inp):
20
  inp = inp.reshape((-1, 224, 224, 3))
21
  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
22
  prediction = inception_net.predict(inp).reshape(1,1000)
23
  pred_scores = tf.keras.applications.mobilenet_v2.decode_predictions(prediction, top=100)
24
- #prediction = inception_net.predict(inp).flatten()
25
- confidences = {f'{pred_scores[0][i][1]}': float(pred_scores[0][i][2]) for i in range(100)}
26
- #confidences = {tags[i]: float(prediction[i]) for i in range(1000)}
27
- return confidences
28
 
29
- def audio_to_text(audio):
 
30
  text = trans(audio)["text"]
31
  return text
 
 
 
 
32
 
33
- def text_to_sentiment(text):
34
- return classify(text)[0]["label"]
35
 
36
- '''Define blocks for demo'''
37
  demo = gr.Blocks()
38
 
39
- '''Making demo'''
40
  with demo:
41
- gr.Markdown("Second Demo with Blocks")
42
  with gr.Tabs():
43
- with gr.TabItem("Transcript audio in spanish"):
 
44
  with gr.Row():
45
- audio = gr.Audio(source="microphone", type="filepath")
46
- transcription = gr.Textbox()
47
- button1 = gr.Button("Please transcript")
48
 
49
- with gr.TabItem("Sentiment analisys"):
50
  with gr.Row():
51
- text = gr.Textbox()
52
  label = gr.Label()
53
- button2 = gr.Button("Please sentiment")
54
 
55
- with gr.TabItem("Image classify"):
 
 
 
56
  with gr.Row():
57
- image = gr.Image(shape=(224,224))
58
- labelImage = gr.Label(num_top_classes=3)
59
- button3 = gr.Button("Please classify Image")
60
-
61
- button1.click(audio_to_text, inputs=audio, outputs=transcription)
62
- button2.click(text_to_sentiment, inputs=text, outputs=label)
63
- button3.click(classify_image, inputs=image, outputs=labelImage)
64
 
65
  demo.launch()
 
1
+ import gradio as gr
2
  import tensorflow as tf
 
3
  from transformers import pipeline
 
4
 
 
5
  inception_net = tf.keras.applications.MobileNetV2()
6
+ def classify_imagen(inp):
 
 
 
 
 
 
 
 
 
 
7
  inp = inp.reshape((-1, 224, 224, 3))
8
  inp = tf.keras.applications.mobilenet_v2.preprocess_input(inp)
9
  prediction = inception_net.predict(inp).reshape(1,1000)
10
  pred_scores = tf.keras.applications.mobilenet_v2.decode_predictions(prediction, top=100)
11
+ confidence = {f'{pred_scores[0][i][1]}': float(pred_scores[0][i][2]) for i in range(100)}
12
+ return confidence
 
 
13
 
14
+ trans = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-large-xlsr-53-spanish")
15
+ def audio2text(audio):
16
  text = trans(audio)["text"]
17
  return text
18
+
19
+ classificator = pipeline("text-classification", model="pysentimiento/robertuito-sentiment-analysis")
20
+ def text2sentiment(text):
21
+ return classificator(text)[0]['label']
22
 
 
 
23
 
 
24
  demo = gr.Blocks()
25
 
 
26
  with demo:
27
+ gr.Markdown("Este es el segundo demo con Blocks hecho por Rafa")
28
  with gr.Tabs():
29
+
30
+ with gr.TabItem("Transcribe Audio en español"):
31
  with gr.Row():
32
+ audio = gr.Audio(source='microphone', type='filepath')
33
+ transcript = gr.Textbox()
34
+ b1 = gr.Button("Transcribe")
35
 
36
+ with gr.TabItem("Analisis de sentimientos"):
37
  with gr.Row():
38
+ texto = gr.Textbox()
39
  label = gr.Label()
40
+ b2 = gr.Button("Sentimientos")
41
 
42
+ b1.click(audio2text, inputs=audio, outputs=transcript)
43
+ b2.click(text2sentiment, inputs=texto, outputs=label)
44
+
45
+ with gr.TabItem("Clasificador de imagenes"):
46
  with gr.Row():
47
+ image = gr.Image(shape=(224, 224))
48
+ label= gr.Label(num_top_classes=3)
49
+ bimage= gr.Button("Clasificar")
50
+
51
+ bimage.click(classify_imagen, inputs=image, outputs=label)
 
 
52
 
53
  demo.launch()