dioarafl commited on
Commit
dc024f2
·
verified ·
1 Parent(s): 3b11cb7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -15
app.py CHANGED
@@ -3,17 +3,15 @@ from gtts import gTTS
3
  from io import BytesIO
4
  import IPython.display as ipd
5
 
6
- # Load CodeLlama model
7
  codellama_model = gr.Interface.load("models/meta-llama/CodeLlama-7b-Python-hf")
8
-
9
- # Load deepseek-coder model
10
  deepseek_model = gr.Interface.load("models/deepseek-ai/deepseek-coder-1.3b-instruct")
11
 
12
- # Define function to process text input for CodeLlama model
13
  def process_text_codellama(input_text):
14
  return codellama_model.predict(input_text)
15
 
16
- # Define function to process speech input for CodeLlama model
17
  def process_speech_codellama(audio):
18
  response = codellama_model.predict(audio)
19
  tts = gTTS(text=response, lang='en')
@@ -22,11 +20,11 @@ def process_speech_codellama(audio):
22
  fp.seek(0)
23
  return ipd.Audio(fp.read(), autoplay=True)
24
 
25
- # Define function to process text input for deepseek model
26
  def process_text_deepseek(input_text):
27
  return deepseek_model.predict(input_text)
28
 
29
- # Define function to process speech input for deepseek model
30
  def process_speech_deepseek(audio):
31
  response = deepseek_model.predict(audio)
32
  tts = gTTS(text=response, lang='en')
@@ -35,20 +33,13 @@ def process_speech_deepseek(audio):
35
  fp.seek(0)
36
  return ipd.Audio(fp.read(), autoplay=True)
37
 
38
- # Add voice input and text input with responsive button
39
- inputs = [
40
- gr.inputs.Textbox(label="Type your code", placeholder="Type your code here...", lines=10),
41
- gr.inputs.Checkbox(label="Enable voice input", default=False),
42
- "text" # Dropdown for model selection
43
- ]
44
 
45
- # Launch interface with live=True
46
  gr.Interface(
47
  fn=[
48
  [process_text_codellama, process_speech_codellama],
49
  [process_text_deepseek, process_speech_deepseek]
50
  ],
51
- inputs=inputs,
52
  outputs=["text", "audio"],
53
  live=True
54
  ).launch()
 
3
  from io import BytesIO
4
  import IPython.display as ipd
5
 
6
+ # Load model
7
  codellama_model = gr.Interface.load("models/meta-llama/CodeLlama-7b-Python-hf")
 
 
8
  deepseek_model = gr.Interface.load("models/deepseek-ai/deepseek-coder-1.3b-instruct")
9
 
10
+
11
  def process_text_codellama(input_text):
12
  return codellama_model.predict(input_text)
13
 
14
+
15
  def process_speech_codellama(audio):
16
  response = codellama_model.predict(audio)
17
  tts = gTTS(text=response, lang='en')
 
20
  fp.seek(0)
21
  return ipd.Audio(fp.read(), autoplay=True)
22
 
23
+
24
  def process_text_deepseek(input_text):
25
  return deepseek_model.predict(input_text)
26
 
27
+
28
  def process_speech_deepseek(audio):
29
  response = deepseek_model.predict(audio)
30
  tts = gTTS(text=response, lang='en')
 
33
  fp.seek(0)
34
  return ipd.Audio(fp.read(), autoplay=True)
35
 
 
 
 
 
 
 
36
 
 
37
  gr.Interface(
38
  fn=[
39
  [process_text_codellama, process_speech_codellama],
40
  [process_text_deepseek, process_speech_deepseek]
41
  ],
42
+ inputs="text",
43
  outputs=["text", "audio"],
44
  live=True
45
  ).launch()