DHEIVER commited on
Commit
f8e5757
·
verified ·
1 Parent(s): 621d541

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -15
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqGeneration
3
  import torch
4
  import warnings
5
  warnings.filterwarnings('ignore')
@@ -11,6 +11,9 @@ try:
11
  'transcription': pipeline("automatic-speech-recognition",
12
  model="openai/whisper-small",
13
  device=device),
 
 
 
14
  'summarization': pipeline("summarization",
15
  model="facebook/bart-large-cnn",
16
  device=device),
@@ -25,10 +28,6 @@ try:
25
  device=device)
26
  }
27
 
28
- # Carregando modelos de tradução
29
- tokenizer_en_pt = AutoTokenizer.from_pretrained("unicamp-dl/translation-en-pt-t5")
30
- model_en_pt = AutoModelForSeq2SeqGeneration.from_pretrained("unicamp-dl/translation-en-pt-t5")
31
-
32
  except Exception as e:
33
  print(f"Erro ao carregar modelos: {str(e)}")
34
 
@@ -40,20 +39,41 @@ def safe_process(func):
40
  return f"Erro ao processar: {str(e)}"
41
  return wrapper
42
 
 
 
 
 
 
 
43
  @safe_process
44
  def translate(text, direction):
45
  if not text:
46
  return "Por favor, insira um texto para tradução."
47
-
48
- input_text = text
49
  if direction == "pt_en":
50
- input_text = f"translate Portuguese to English: {text}"
51
  else:
52
- input_text = f"translate English to Portuguese: {text}"
53
-
54
- inputs = tokenizer_en_pt(input_text, return_tensors="pt", max_length=512, truncation=True)
55
- outputs = model_en_pt.generate(**inputs)
56
- return tokenizer_en_pt.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  @safe_process
59
  def chat_response(message, history):
@@ -63,8 +83,6 @@ def chat_response(message, history):
63
  history.append((message, response[0]['generated_text']))
64
  return "", history
65
 
66
- # [Resto das funções permanecem iguais]
67
-
68
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
69
  with gr.Tab("Início"):
70
  gr.HTML(open("index.html").read())
@@ -91,6 +109,29 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
91
  outputs=translation_output
92
  )
93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  with gr.Tab("Chat"):
95
  chatbot = gr.Chatbot()
96
  msg = gr.Textbox(label="Mensagem")
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoTokenizer, AutoModel
3
  import torch
4
  import warnings
5
  warnings.filterwarnings('ignore')
 
11
  'transcription': pipeline("automatic-speech-recognition",
12
  model="openai/whisper-small",
13
  device=device),
14
+ 'translation': pipeline("translation",
15
+ model="facebook/mbart-large-50-many-to-many-mmt",
16
+ device=device),
17
  'summarization': pipeline("summarization",
18
  model="facebook/bart-large-cnn",
19
  device=device),
 
28
  device=device)
29
  }
30
 
 
 
 
 
31
  except Exception as e:
32
  print(f"Erro ao carregar modelos: {str(e)}")
33
 
 
39
  return f"Erro ao processar: {str(e)}"
40
  return wrapper
41
 
42
+ @safe_process
43
+ def transcribe(audio):
44
+ if not audio:
45
+ return "Por favor, forneça um arquivo de áudio."
46
+ return models['transcription'](audio)["text"]
47
+
48
  @safe_process
49
  def translate(text, direction):
50
  if not text:
51
  return "Por favor, insira um texto para tradução."
52
+
 
53
  if direction == "pt_en":
54
+ result = models['translation'](text, src_lang="pt", tgt_lang="en")[0]
55
  else:
56
+ result = models['translation'](text, src_lang="en", tgt_lang="pt")[0]
57
+
58
+ return result['translation_text']
59
+
60
+ @safe_process
61
+ def summarize(text):
62
+ if not text:
63
+ return "Por favor, insira um texto para resumir."
64
+ return models['summarization'](text, max_length=130, min_length=30)[0]['summary_text']
65
+
66
+ @safe_process
67
+ def analyze_sentiment(text):
68
+ if not text:
69
+ return "Por favor, insira um texto para análise."
70
+ return models['sentiment'](text)[0]['label']
71
+
72
+ @safe_process
73
+ def answer_question(question, context):
74
+ if not question or not context:
75
+ return "Por favor, forneça tanto a pergunta quanto o contexto."
76
+ return models['question_answering'](question=question, context=context)['answer']
77
 
78
  @safe_process
79
  def chat_response(message, history):
 
83
  history.append((message, response[0]['generated_text']))
84
  return "", history
85
 
 
 
86
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
87
  with gr.Tab("Início"):
88
  gr.HTML(open("index.html").read())
 
109
  outputs=translation_output
110
  )
111
 
112
+ with gr.Tab("Resumo"):
113
+ text_to_summarize = gr.Textbox(label="Texto para Resumir", lines=5)
114
+ summarize_button = gr.Button("Resumir")
115
+ summary_output = gr.Textbox(label="Resumo", lines=3)
116
+ summarize_button.click(summarize, inputs=text_to_summarize, outputs=summary_output)
117
+
118
+ with gr.Tab("Análise de Sentimento"):
119
+ sentiment_text = gr.Textbox(label="Texto para Análise", lines=3)
120
+ sentiment_button = gr.Button("Analisar")
121
+ sentiment_output = gr.Textbox(label="Sentimento")
122
+ sentiment_button.click(analyze_sentiment, inputs=sentiment_text, outputs=sentiment_output)
123
+
124
+ with gr.Tab("Perguntas e Respostas"):
125
+ question_input = gr.Textbox(label="Pergunta")
126
+ context_input = gr.Textbox(label="Contexto", lines=5)
127
+ qa_button = gr.Button("Responder")
128
+ qa_output = gr.Textbox(label="Resposta", lines=2)
129
+ qa_button.click(
130
+ answer_question,
131
+ inputs=[question_input, context_input],
132
+ outputs=qa_output
133
+ )
134
+
135
  with gr.Tab("Chat"):
136
  chatbot = gr.Chatbot()
137
  msg = gr.Textbox(label="Mensagem")