File size: 2,518 Bytes
6002427
 
c455914
6002427
 
 
fa1db35
 
cf8c99d
6002427
 
cf8c99d
 
fa1db35
 
6002427
cf8c99d
 
 
 
 
 
6002427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa1db35
 
 
6002427
 
fa1db35
 
 
 
6002427
 
cf8c99d
 
 
 
 
6002427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a8f38b
6002427
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
from datasets import Dataset
from huggingface_hub import HfApi, login
import os

# Initialize the dataset with a sample entry
initial_data = {
    "model": ["example/model"],
    "model_raw": ["example/model"],
    "base_model": ["gpt2"],
    "revision": ["main"],
    "precision": ["fp16"],
    "weight_type": ["Safetensors"],
    "model_type": ["Pretrained"],
    "status": ["PENDING"],
    "timestamp": ["2025-01-26T15:15:09.693973"],
    "security_score": [0.5],
    "safetensors_compliant": [True],
    "hub_license": ["MIT"],
    "hub_likes": [0],
    "params_billion": [0.5],
    "available_on_hub": [True],
    "model_sha": ["abc123"]
}

# Create a Dataset object
dataset = Dataset.from_dict(initial_data)

# Login to Hugging Face (you'll need to set the HUGGINGFACE_TOKEN environment variable)
login()

# Push the dataset to the Hugging Face Hub
dataset.push_to_hub("stacklok/results")

# Create a dataset card
dataset_card = """
---
language:
- en
license:
- mit
---

# Dataset Card for stacklok/results

This dataset contains evaluation results for various models, focusing on security scores and other relevant metrics.

## Dataset Structure

The dataset contains the following fields:
- `model`: The identifier of the model
- `model_raw`: The raw model identifier
- `base_model`: The base model if applicable
- `revision`: The revision or version of the model
- `precision`: The precision used for the model (e.g., fp16, fp32)
- `weight_type`: Type of weights used
- `model_type`: Type of the model
- `status`: Current status of the evaluation
- `timestamp`: When the evaluation was performed
- `security_score`: A score representing the model's security evaluation
- `safetensors_compliant`: A boolean indicating whether the model is compliant with safetensors
- `hub_license`: The license of the model on Hugging Face Hub
- `hub_likes`: Number of likes on Hugging Face Hub
- `params_billion`: Number of parameters in billions
- `available_on_hub`: Whether the model is available on Hugging Face Hub
- `model_sha`: SHA hash of the model

## Usage

This dataset is used to populate the secure code leaderboard, providing insights into the security aspects of various models.
"""

# Write the dataset card
with open("README.md", "w") as f:
    f.write(dataset_card)

# Upload the dataset card
api = HfApi()
api.upload_file(
    path_or_fileobj="README.md",
    path_in_repo="README.md",
    repo_id="stacklok/results",
    repo_type="dataset"
)

print("Dataset initialized and card uploaded successfully!")