xlgeng commited on
Commit
a40a139
·
1 Parent(s): 72960e5

test infer

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -53,11 +53,12 @@ TASK_PROMPT_MAPPING = {
53
  "STTC (Speech to Text Chat)": "首先将语音转录为文字,然后对语音内容进行回复,转录和文字之间使用<开始回答>分割。"
54
  }
55
 
 
56
  def init_model_my():
57
  logging.basicConfig(level=logging.DEBUG,
58
  format='%(asctime)s %(levelname)s %(message)s')
59
- config_path = "/home/node54_tmpdata/xlgeng/code/wenet_undersdand_and_speech_xlgeng/examples/wenetspeech/whisper/exp/update_data/epoch_1_with_token/epoch_11.yaml"
60
- checkpoint_path = "/home/work_nfs15/asr_data/ckpt/understanding_model/epoch_13_with_asr-chat_full_data/step_32499/step_32499.pt"
61
  args = SimpleNamespace(**{
62
  "checkpoint": checkpoint_path,
63
  })
@@ -68,7 +69,7 @@ def init_model_my():
68
  print(model)
69
  return model, tokenizer
70
 
71
- # model, tokenizer = init_model_my()
72
  print("model init success")
73
  def do_resample(input_wav_path, output_wav_path):
74
  """"""
@@ -120,13 +121,13 @@ def true_decode_fuc(input_wav_path, input_prompt):
120
  model = None
121
  res_text = model.generate(wavs=feat, wavs_len=feat_lens, prompt=input_prompt)[0]
122
  print("耿雪龙哈哈:", res_text)
123
- return res_text, now_file_tmp_path_resample
124
  @spaces.GPU
125
  def do_decode(input_wav_path, input_prompt):
126
  print(f'input_wav_path= {input_wav_path}, input_prompt= {input_prompt}')
127
  # 省略处理逻辑
128
- # output_res, now_file_tmp_path_resample= true_decode_fuc(input_wav_path, input_prompt)
129
- output_res = f"耿雪龙哈哈:测试结果, input_wav_path= {input_wav_path}, input_prompt= {input_prompt}"
130
  return output_res
131
 
132
  def save_to_jsonl(if_correct, wav, prompt, res):
 
53
  "STTC (Speech to Text Chat)": "首先将语音转录为文字,然后对语音内容进行回复,转录和文字之间使用<开始回答>分割。"
54
  }
55
 
56
+ @spaces.GPU
57
  def init_model_my():
58
  logging.basicConfig(level=logging.DEBUG,
59
  format='%(asctime)s %(levelname)s %(message)s')
60
+ config_path = "train.yaml"
61
+ checkpoint_path = "step_32499.pt"
62
  args = SimpleNamespace(**{
63
  "checkpoint": checkpoint_path,
64
  })
 
69
  print(model)
70
  return model, tokenizer
71
 
72
+ model, tokenizer = init_model_my()
73
  print("model init success")
74
  def do_resample(input_wav_path, output_wav_path):
75
  """"""
 
121
  model = None
122
  res_text = model.generate(wavs=feat, wavs_len=feat_lens, prompt=input_prompt)[0]
123
  print("耿雪龙哈哈:", res_text)
124
+ return res_text
125
  @spaces.GPU
126
  def do_decode(input_wav_path, input_prompt):
127
  print(f'input_wav_path= {input_wav_path}, input_prompt= {input_prompt}')
128
  # 省略处理逻辑
129
+ output_res= true_decode_fuc(input_wav_path, input_prompt)
130
+ # output_res = f"耿雪龙哈哈:测试结果, input_wav_path= {input_wav_path}, input_prompt= {input_prompt}"
131
  return output_res
132
 
133
  def save_to_jsonl(if_correct, wav, prompt, res):