None1145 commited on
Commit
05121a3
·
verified ·
1 Parent(s): fd29de7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import pandas as pd
2
  import gradio as gr
3
 
4
- def compare_csv_files():
5
  df1 = pd.read_csv("fish-speech-1.5.csv")
6
  df2 = pd.read_csv("fish-speech-1.4.csv")
7
 
@@ -11,21 +11,21 @@ def compare_csv_files():
11
  merged_df["CharacterErrorRate_Diff"] = merged_df["CharacterErrorRate_1.5"] - merged_df["CharacterErrorRate_1.4"]
12
 
13
  merged_df["WordErrorRate_Comparison"] = merged_df["WordErrorRate_Diff"].apply(
14
- lambda x: "1.4 is the same as 1.5 (Ignored due to large diff)" if abs(x) > 10 else (
15
  f"1.5 is stronger than 1.4 ({x:.8f})" if x < 0 else (
16
  f"1.4 is stronger than 1.5 ({-x:.8f})" if x > 0 else "1.4 is the same as 1.5 (0)"
17
  )
18
  )
19
  )
20
  merged_df["CharacterErrorRate_Comparison"] = merged_df["CharacterErrorRate_Diff"].apply(
21
- lambda x: "1.4 is the same as 1.5 (Ignored due to large diff)" if abs(x) > 10 else (
22
  f"1.5 is stronger than 1.4 ({x:.8f})" if x < 0 else (
23
  f"1.4 is stronger than 1.5 ({-x:.8f})" if x > 0 else "1.4 is the same as 1.5 (0)"
24
  )
25
  )
26
  )
27
 
28
- avg_word_diff = merged_df["WordErrorRate_Diff"].loc[merged_df["WordErrorRate_Diff"].abs() <= 10].mean()
29
  avg_char_diff = merged_df["CharacterErrorRate_Diff"].loc[merged_df["CharacterErrorRate_Diff"].abs() <= 1].mean()
30
  overall_summary = f"""
31
  <h3>Overall Comparison:</h3>
@@ -42,9 +42,10 @@ def compare_csv_files():
42
 
43
  return overall_summary + result.to_html(index=False)
44
 
 
45
  gr.Interface(
46
  fn=compare_csv_files,
47
- inputs=None,
48
  outputs="html",
49
  title="Fish Speech Benchmark",
50
  description="This is a non official model performance test from Fish Speech / Whisper Base / More data will be added later (not too much)"
 
1
  import pandas as pd
2
  import gradio as gr
3
 
4
+ def compare_csv_files(max_num):
5
  df1 = pd.read_csv("fish-speech-1.5.csv")
6
  df2 = pd.read_csv("fish-speech-1.4.csv")
7
 
 
11
  merged_df["CharacterErrorRate_Diff"] = merged_df["CharacterErrorRate_1.5"] - merged_df["CharacterErrorRate_1.4"]
12
 
13
  merged_df["WordErrorRate_Comparison"] = merged_df["WordErrorRate_Diff"].apply(
14
+ lambda x: "1.4 is the same as 1.5 (Ignored due to large diff)" if abs(x) > max_num else (
15
  f"1.5 is stronger than 1.4 ({x:.8f})" if x < 0 else (
16
  f"1.4 is stronger than 1.5 ({-x:.8f})" if x > 0 else "1.4 is the same as 1.5 (0)"
17
  )
18
  )
19
  )
20
  merged_df["CharacterErrorRate_Comparison"] = merged_df["CharacterErrorRate_Diff"].apply(
21
+ lambda x: "1.4 is the same as 1.5 (Ignored due to large diff)" if abs(x) > max_num else (
22
  f"1.5 is stronger than 1.4 ({x:.8f})" if x < 0 else (
23
  f"1.4 is stronger than 1.5 ({-x:.8f})" if x > 0 else "1.4 is the same as 1.5 (0)"
24
  )
25
  )
26
  )
27
 
28
+ avg_word_diff = merged_df["WordErrorRate_Diff"].loc[merged_df["WordErrorRate_Diff"].abs() <= max_num].mean()
29
  avg_char_diff = merged_df["CharacterErrorRate_Diff"].loc[merged_df["CharacterErrorRate_Diff"].abs() <= 1].mean()
30
  overall_summary = f"""
31
  <h3>Overall Comparison:</h3>
 
42
 
43
  return overall_summary + result.to_html(index=False)
44
 
45
+ max_num = gr.number(Number=10)
46
  gr.Interface(
47
  fn=compare_csv_files,
48
+ inputs=[max_num],
49
  outputs="html",
50
  title="Fish Speech Benchmark",
51
  description="This is a non official model performance test from Fish Speech / Whisper Base / More data will be added later (not too much)"