File size: 2,427 Bytes
5e9cb18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a17291
5e9cb18
1a17291
5e9cb18
1a17291
 
 
 
 
5e9cb18
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
import pandas as pd
block = gr.Blocks(title="Q-Bench Leaderboard")

LEADERBORAD_INTRODUCTION = """# Q-Bench Leaderboard


  <img style="width:40%" src="https://raw.githubusercontent.com/Q-Future/Q-Bench/master/logo.png">
    
    
    *"How do multi-modaility LLMs perform on low-level computer vision?"*  
    πŸ† Welcome to the leaderboard of the **Q-Bench**! *A Comprehensive Benchmark Suite for General-purpose Foundation Models on Low-level Vision*
    <div style="display: flex; flex-wrap: wrap; align-items: center; gap: 10px;">
    <a href="https://github.com/Q-Future/"><img src="https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2Fvqassessment%2FQ-Bench&count_bg=%23E97EBA&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=visitors&edge_flat=false"/></a>
    <a href="https://github.com/Q-Future/Q-Bench"><img src="https://img.shields.io/github/stars/Q-Future/Q-Bench"/></a>
    <a href="https://arxiv.org/abs/2309.14181"><img src="https://img.shields.io/badge/Arxiv-2309:14181-red"/></a>
    <a href="https://github.com/Q-Future/Q-Bench/releases/tag/v1.0.1.1014datarelease"><img src="https://img.shields.io/badge/Data-Release-green"></a>
    <a href="https://github.com/Q-Future/Q-Instruct"><img src="https://img.shields.io/badge/Awesome-QInstruct-orange"/></a>
   </div>
    
    - **Low-level Visual Perception (A1):** Open-range multi-choice questions on low-level visual perception. Dataset: [LLVisionQA](https://huggingface.co/datasets/teowu/LLVisionQA-QBench)
    - **Low-level Visual Description (A2):** Detailed description on low-level visual attributes. Dataset: [LLDescribe](https://huggingface.co/datasets/teowu/LLDescribe-QBench)
    - **Visual Quality Assessment (A3):** MLLMs can give a *precise visual quality score* via *logprobs*! 
    
    Right now we only include results validated in our paper. We will allow user submission soon.
    """


with block:
    gr.Markdown(
        LEADERBORAD_INTRODUCTION
    )
    with gr.Tab("Perception (A1, dev)"):
        gr.DataFrame(pd.read_csv("qbench_a1_single_dev.csv"))
    with gr.Tab("Perception (A1,test)"):
        gr.DataFrame(pd.read_csv("qbench_a1_single_test.csv"))
    with gr.Tab("Description (A2)"):
        gr.DataFrame(pd.read_csv("qbench_a2_single.csv"))
    with gr.Tab("Assessment (A3)"):
        gr.DataFrame(pd.read_csv("qbench_a3_single.csv"))
    
    
block.launch(share=True)