stackoverflow-qa / README.md
CO-IR's picture
Update README.md
67c8d0f verified
metadata
dataset_info:
  - config_name: corpus
    features:
      - name: _id
        dtype: string
      - name: partition
        dtype: string
      - name: text
        dtype: string
      - name: language
        dtype: string
      - name: title
        dtype: string
    splits:
      - name: corpus
        num_bytes: 24718668
        num_examples: 19931
    download_size: 13352028
    dataset_size: 24718668
  - config_name: default
    features:
      - name: query-id
        dtype: string
      - name: corpus-id
        dtype: string
      - name: score
        dtype: int64
    splits:
      - name: train
        num_bytes: 368416
        num_examples: 13951
      - name: test
        num_bytes: 55832
        num_examples: 1994
    download_size: 182796
    dataset_size: 424248
  - config_name: queries
    features:
      - name: _id
        dtype: string
      - name: partition
        dtype: string
      - name: text
        dtype: string
      - name: language
        dtype: string
      - name: title
        dtype: string
    splits:
      - name: queries
        num_bytes: 28244088
        num_examples: 19931
    download_size: 14308141
    dataset_size: 28244088
configs:
  - config_name: corpus
    data_files:
      - split: corpus
        path: corpus/corpus-*
  - config_name: default
    data_files:
      - split: train
        path: data/train-*
      - split: test
        path: data/test-*
  - config_name: queries
    data_files:
      - split: queries
        path: queries/queries-*

Employing the MTEB evaluation framework's dataset version, utilize the code below for assessment:

import mteb
import logging
from sentence_transformers import SentenceTransformer
from mteb import MTEB

logger = logging.getLogger(__name__)

model_name = 'intfloat/e5-base-v2'
model = SentenceTransformer(model_name)
tasks = mteb.get_tasks(
    tasks=[
        "AppsRetrieval",
        "CodeFeedbackMT",
        "CodeFeedbackST",
        "CodeTransOceanContest",
        "CodeTransOceanDL",
        "CosQA",
        "SyntheticText2SQL",
        "StackOverflowQA",
        "COIRCodeSearchNetRetrieval",
        "CodeSearchNetCCRetrieval",
    ]
)
evaluation = MTEB(tasks=tasks)
results = evaluation.run(
    model=model,
    overwrite_results=True
)
print(result)