prithivMLmods commited on
Commit
96784fc
·
verified ·
1 Parent(s): 2062b4d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -4,6 +4,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
4
  import gradio as gr
5
  from snac import SNAC
6
 
 
7
  def redistribute_codes(row):
8
  """
9
  Convert a sequence of token codes into an audio waveform using SNAC.
@@ -42,9 +43,9 @@ def redistribute_codes(row):
42
  snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").to("cuda")
43
 
44
  # Load the single-speaker language model
45
- tokenizer = AutoTokenizer.from_pretrained('prithivMLmods/Llama-3B-Mono-Jim')
46
  model = AutoModelForCausalLM.from_pretrained(
47
- 'prithivMLmods/Llama-3B-Mono-Jim', torch_dtype=torch.bfloat16
48
  ).cuda()
49
 
50
  @spaces.GPU
@@ -52,7 +53,7 @@ def generate_audio(text, temperature, top_p, max_new_tokens):
52
  """
53
  Given input text, generate speech audio.
54
  """
55
- speaker = "Jim"
56
  prompt = f'<custom_token_3><|begin_of_text|>{speaker}: {text}<|eot_id|><custom_token_4><custom_token_5><custom_token_1>'
57
  input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').to('cuda')
58
 
@@ -75,8 +76,8 @@ def generate_audio(text, temperature, top_p, max_new_tokens):
75
 
76
  # Gradio Interface
77
  with gr.Blocks() as demo:
78
- gr.Markdown("# Llama-3B-Mono-Jim - Single Speaker Audio Generation")
79
- gr.Markdown("Generate speech audio using the `prithivMLmods/Llama-3B-Mono-Jim` model.")
80
 
81
  with gr.Row():
82
  text_input = gr.Textbox(lines=4, label="Input Text")
@@ -96,4 +97,4 @@ with gr.Blocks() as demo:
96
  )
97
 
98
  if __name__ == "__main__":
99
- demo.launch()
 
4
  import gradio as gr
5
  from snac import SNAC
6
 
7
+
8
  def redistribute_codes(row):
9
  """
10
  Convert a sequence of token codes into an audio waveform using SNAC.
 
43
  snac_model = SNAC.from_pretrained("hubertsiuzdak/snac_24khz").to("cuda")
44
 
45
  # Load the single-speaker language model
46
+ tokenizer = AutoTokenizer.from_pretrained('prithivMLmods/Llama-3B-Mono-Cooper')
47
  model = AutoModelForCausalLM.from_pretrained(
48
+ 'prithivMLmods/Llama-3B-Mono-Cooper', torch_dtype=torch.bfloat16
49
  ).cuda()
50
 
51
  @spaces.GPU
 
53
  """
54
  Given input text, generate speech audio.
55
  """
56
+ speaker = "Cooper"
57
  prompt = f'<custom_token_3><|begin_of_text|>{speaker}: {text}<|eot_id|><custom_token_4><custom_token_5><custom_token_1>'
58
  input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').to('cuda')
59
 
 
76
 
77
  # Gradio Interface
78
  with gr.Blocks() as demo:
79
+ gr.Markdown("# Llama-3B-Mono-Cooper - Single Speaker Audio Generation")
80
+ gr.Markdown("Generate speech audio using the `prithivMLmods/Llama-3B-Mono-Cooper` model.")
81
 
82
  with gr.Row():
83
  text_input = gr.Textbox(lines=4, label="Input Text")
 
97
  )
98
 
99
  if __name__ == "__main__":
100
+ demo.launch()