Update app.py
Browse files
app.py
CHANGED
@@ -3,68 +3,66 @@ import subprocess
|
|
3 |
import openai
|
4 |
import gradio as gr
|
5 |
|
6 |
-
openai.api_key = "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"
|
7 |
|
8 |
-
import
|
9 |
|
10 |
-
|
11 |
-
headers = {"Authorization": "Bearer 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"}
|
12 |
|
13 |
-
def query(filename):
|
14 |
-
with open(filename, "rb") as f:
|
15 |
-
data = f.read()
|
16 |
-
response = requests.post(API_URL, headers=headers, data=data)
|
17 |
-
return response.json()
|
18 |
|
19 |
def transcribe(audio):
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
23 |
|
24 |
def generate_response(transcribed_text):
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
45 |
|
46 |
def inference(text):
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
51 |
|
52 |
def process_audio_and_respond(audio):
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
57 |
|
58 |
demo = gr.Interface(
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
)
|
69 |
|
70 |
-
|
|
|
|
3 |
import openai
|
4 |
import gradio as gr
|
5 |
|
|
|
6 |
|
7 |
+
import os
|
8 |
|
9 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
|
|
10 |
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def transcribe(audio):
|
13 |
+
with open(audio, "rb") as audio_file:
|
14 |
+
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
15 |
+
return transcript["text"]
|
16 |
+
|
17 |
|
18 |
def generate_response(transcribed_text):
|
19 |
+
response = openai.ChatCompletion.create(
|
20 |
+
model="gpt-3.5-turbo",
|
21 |
+
messages=[
|
22 |
+
{"role": "system", "content": "Wewe ni mtaalamu wa viazi lishe na utajibu maswali yote kwa kiswahili tu!"},
|
23 |
+
{"role": "user", "content": "Mambo vipi?"},
|
24 |
+
{"role": "assistant", "content": """Salama je una swali lolote kuhusu viazi lishe?"""},
|
25 |
+
{"role": "user", "content": "nini maana ya Viazi lishe?"},
|
26 |
+
{"role": "assistant", "content": """ viazi lishe ni Viazi vitamu vyenye rangi ya karoti kwa ndani ambavyo vina vitamin A kwa wingi"""},
|
27 |
+
{"role": "user", "content": "nini matumizi ya viazi lishe?"},
|
28 |
+
{"role": "assistant", "content": """ viazi lishe vinaweza kutengenezea chakula kama Keki,
|
29 |
+
Maandazi, Kalimati na tambi: Ukisaga unga wa viazi lishe,
|
30 |
+
unaweza kutumika kupika vyakula ambavyo huwa watu
|
31 |
+
hutumia unga wa ngano kupika, unga wa viazi lishe una
|
32 |
+
virutubisho vingi zaidi kuliko unga wa ngano na
|
33 |
+
ukitumika kupikia vyakula tajwa hapo juu watumiaji
|
34 |
+
watakuwa wanakula vyakula vyenye virutubisho Zaidi."""},
|
35 |
+
{"role": "user", "content": transcribed_text},
|
36 |
+
]
|
37 |
+
)
|
38 |
+
return response['choices'][0]['message']['content']
|
39 |
+
|
40 |
|
41 |
def inference(text):
|
42 |
+
output_file = "tts_output.wav"
|
43 |
+
cmd = ['tts', '--text', text, '--out_path', output_file]
|
44 |
+
subprocess.run(cmd, check=True)
|
45 |
+
return output_file
|
46 |
+
|
47 |
|
48 |
def process_audio_and_respond(audio):
|
49 |
+
text = transcribe(audio)
|
50 |
+
response_text = generate_response(text)
|
51 |
+
output_file = inference(response_text)
|
52 |
+
return response_text, output_file
|
53 |
+
|
54 |
|
55 |
demo = gr.Interface(
|
56 |
+
process_audio_and_respond,
|
57 |
+
gr.inputs.Audio(source="microphone", type="filepath", label="Bonyeza kitufe cha kurekodi na uliza swali lako"),
|
58 |
+
[gr.outputs.Textbox(label="Jibu (kwa njia ya maandishi)"), gr.outputs.Audio(type="filepath", label="Jibu kwa njia ya sauti (Bofya kusikiliza Jibu)")],
|
59 |
+
title="Mtaalamu wa Viazi Lishe",
|
60 |
+
description="Uliza Mtaalamu wetu swali lolote Kuhusu viazi Lishe",
|
61 |
+
theme="compact",
|
62 |
+
layout="vertical",
|
63 |
+
allow_flagging=False,
|
64 |
+
live=True,
|
65 |
)
|
66 |
|
67 |
+
|
68 |
+
demo.launch(share = True)
|