pufanyi commited on
Commit
acd8e8a
1 Parent(s): ec6c7c1

chore: Update app.py and css_html_js.py to improve UI and functionality

Browse files
Files changed (4) hide show
  1. app.py +5 -6
  2. src/about.py +21 -0
  3. src/display/css_html_js.py +4 -0
  4. src/links.yaml +9 -0
app.py CHANGED
@@ -17,12 +17,11 @@ from src.about import (
17
  LLM_BENCHMARKS_TEXT,
18
  TITLE,
19
  )
20
- from src.display.css_html_js import custom_css
 
 
 
21
  from src.display.utils import (
22
- BENCHMARK_COLS,
23
- COLS,
24
- EVAL_COLS,
25
- EVAL_TYPES,
26
  AutoEvalColumn,
27
  fields,
28
  )
@@ -149,7 +148,7 @@ def init_detailed_results():
149
  """)
150
 
151
 
152
- demo = gr.Blocks(css=custom_css)
153
  with demo:
154
  gr.HTML(TITLE)
155
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
17
  LLM_BENCHMARKS_TEXT,
18
  TITLE,
19
  )
20
+ from src.display.css_html_js import (
21
+ custom_css,
22
+ CSS_EXTERNAL,
23
+ )
24
  from src.display.utils import (
 
 
 
 
25
  AutoEvalColumn,
26
  fields,
27
  )
 
148
  """)
149
 
150
 
151
+ demo = gr.Blocks(css=custom_css, css_external=CSS_EXTERNAL)
152
  with demo:
153
  gr.HTML(TITLE)
154
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
src/about.py CHANGED
@@ -1,6 +1,7 @@
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
 
4
  import os
5
 
6
 
@@ -31,6 +32,26 @@ TITLE = """<h1 align="center" id="space-title">Multimodal LiveBench: From Static
31
  with open(os.path.join(os.path.dirname(__file__), "about.md"), "r") as f:
32
  INTRODUCTION_TEXT = f.read()
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  # Which evaluations are you running? how can people reproduce what you have?
35
  LLM_BENCHMARKS_TEXT = f"""
36
  ## How it works
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
4
+ import yaml
5
  import os
6
 
7
 
 
32
  with open(os.path.join(os.path.dirname(__file__), "about.md"), "r") as f:
33
  INTRODUCTION_TEXT = f.read()
34
 
35
+ def get_link(item): #name, icon, url):
36
+ name = item["name"]
37
+ icon = item.get("icon", None)
38
+ url = item.get("url", "#")
39
+ if icon.startswith("http") and icon.endswith(".svg"):
40
+ icon_tag = f'<img src="{icon}" alt="{name}" style="height: 24px; width: 24px;">'
41
+ elif icon.startswith("fa-"):
42
+ icon_tag = f'<i class="{icon}"></i>'
43
+ elif not icon or icon == "":
44
+ icon_tag = ""
45
+ else:
46
+ raise ValueError("Invalid icon format")
47
+ return f'{icon_tag} <a href="{url}" target="_blank">{name}</a>'
48
+
49
+ with open(os.path.join(os.path.dirname(__file__), "links.yaml"), "r", encoding="utf-8") as f:
50
+ links = yaml.safe_load(f)
51
+ LINKS = " | ".join([
52
+ get_link(item) for item in links
53
+ ])
54
+
55
  # Which evaluations are you running? how can people reproduce what you have?
56
  LLM_BENCHMARKS_TEXT = f"""
57
  ## How it works
src/display/css_html_js.py CHANGED
@@ -106,3 +106,7 @@ get_window_url_params = """
106
  return url_params;
107
  }
108
  """
 
 
 
 
 
106
  return url_params;
107
  }
108
  """
109
+
110
+ CSS_EXTERNAL = [
111
+ "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.1/css/all.min.css"
112
+ ]
src/links.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ - name: GitHub
2
+ icon: fa-brands fa-github
3
+ url: https://github.com/EvolvingLMMs-Lab/lmms-eval
4
+ - name: HuggingFace Datasets
5
+ icon: https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg
6
+ url: https://huggingface.co/datasets/lmms-lab/LiveBench
7
+ - name: Leaderboard
8
+ icon: fa-solid fa-trophy
9
+ url: https://huggingface.co/spaces/lmms-lab/LiveBench