File size: 5,960 Bytes
5dd070e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
import streamlit as st
import pandas as pd
import json
import io
import os
from utils import add_log

# Handle missing dependencies
try:
    from datasets import Dataset, DatasetDict
except ImportError:
    # Create dummy classes for Dataset and DatasetDict
    class Dataset:
        @classmethod
        def from_list(cls, items):
            return {"data": items, "column_names": list(items[0].keys()) if items else []}
            
        @classmethod
        def from_dict(cls, dict_obj):
            return {"data": dict_obj, "column_names": list(dict_obj.keys())}
            
        @classmethod
        def from_pandas(cls, df):
            return {"data": df, "column_names": df.columns.tolist()}
            
        def train_test_split(self, test_size=0.2):
            return {
                "train": self,
                "test": self
            }
    
    class DatasetDict(dict):
        pass

def process_python_dataset(uploaded_file, dataset_name):
    """
    Process an uploaded Python dataset file.
    Supports .py, .json, and .csv formats.
    
    Args:
        uploaded_file: The uploaded file object
        dataset_name: Name to identify the dataset
        
    Returns:
        bool: Success status
    """
    try:
        file_extension = uploaded_file.name.split('.')[-1].lower()
        
        if file_extension == 'py':
            # Process Python file
            content = uploaded_file.read().decode('utf-8')
            # Split by function or class definitions for separate examples
            examples = split_python_file(content)
            dataset = create_dataset_from_examples(examples)
            
        elif file_extension == 'json':
            # Process JSON file
            content = json.loads(uploaded_file.read().decode('utf-8'))
            if isinstance(content, list):
                dataset = Dataset.from_list(content)
            else:
                dataset = Dataset.from_dict(content)
            
        elif file_extension == 'csv':
            # Process CSV file
            df = pd.read_csv(uploaded_file)
            dataset = Dataset.from_pandas(df)
            
        else:
            add_log(f"Unsupported file format: {file_extension}", "ERROR")
            return False
            
        # Split into train/validation sets
        train_test_split = dataset.train_test_split(test_size=0.2)
        
        # Create a DatasetDict
        dataset_dict = DatasetDict({
            'train': train_test_split['train'],
            'validation': train_test_split['test']
        })
        
        # Store in session state
        st.session_state.datasets[dataset_name] = {
            'data': dataset_dict,
            'info': {
                'name': dataset_name,
                'size': len(dataset),
                'train_size': len(train_test_split['train']),
                'validation_size': len(train_test_split['test']),
                'columns': dataset.column_names,
                'created_at': pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S")
            }
        }
        
        add_log(f"Dataset '{dataset_name}' processed successfully with {len(dataset)} examples")
        return True
        
    except Exception as e:
        add_log(f"Error processing dataset: {str(e)}", "ERROR")
        return False

def split_python_file(content):
    """
    Split a Python file content into separate code examples.
    
    Args:
        content: String content of Python file
        
    Returns:
        list: List of code examples
    """
    examples = []
    
    # Simple splitting by function or class definitions
    lines = content.split('\n')
    current_example = []
    
    for line in lines:
        if (line.startswith('def ') or line.startswith('class ')) and current_example:
            # Start of a new function/class, save the previous one
            examples.append('\n'.join(current_example))
            current_example = [line]
        else:
            current_example.append(line)
    
    # Add the last example
    if current_example:
        examples.append('\n'.join(current_example))
    
    # If no examples were extracted, use the whole file as one example
    if not examples:
        examples = [content]
    
    return [{'code': example} for example in examples]

def create_dataset_from_examples(examples):
    """
    Create a dataset from code examples.
    
    Args:
        examples: List of code examples
        
    Returns:
        Dataset: Hugging Face dataset
    """
    return Dataset.from_list(examples)

def validate_dataset_structure(dataset):
    """
    Validate that the dataset has the required structure for training.
    
    Args:
        dataset: Hugging Face dataset
        
    Returns:
        bool: True if valid, False otherwise
    """
    if 'code' not in dataset.column_names:
        add_log("Dataset missing 'code' column required for training", "ERROR")
        return False
    return True

def list_available_datasets():
    """
    List all available datasets in session state.
    
    Returns:
        list: List of dataset names
    """
    if 'datasets' in st.session_state:
        return list(st.session_state.datasets.keys())
    return []

def get_dataset_info(dataset_name):
    """
    Get information about a dataset.
    
    Args:
        dataset_name: Name of the dataset
        
    Returns:
        dict: Dataset information
    """
    if 'datasets' in st.session_state and dataset_name in st.session_state.datasets:
        return st.session_state.datasets[dataset_name]['info']
    return None

def get_dataset(dataset_name):
    """
    Get a dataset by name.
    
    Args:
        dataset_name: Name of the dataset
        
    Returns:
        Dataset: The dataset object
    """
    if 'datasets' in st.session_state and dataset_name in st.session_state.datasets:
        return st.session_state.datasets[dataset_name]['data']
    return None