""" AI Dataset Studio - Modern Web Scraping & Dataset Creation Platform A mini Scale AI for non-coders and vibe coders Features: - Intelligent web scraping with content extraction - Automated data cleaning and preprocessing - Interactive annotation tools - Template-based workflows for common ML tasks - High-quality dataset generation - Export to HuggingFace Hub and popular ML formats - Visual data quality metrics - No-code dataset creation workflows """ import gradio as gr import pandas as pd import numpy as np import json import re import requests from bs4 import BeautifulSoup from urllib.parse import urlparse, urljoin from datetime import datetime, timedelta import logging from typing import Dict, List, Tuple, Optional, Any from dataclasses import dataclass, asdict from pathlib import Path import uuid import hashlib import time from collections import defaultdict import io import zipfile # Optional imports with fallbacks try: from transformers import pipeline, AutoTokenizer, AutoModel from sentence_transformers import SentenceTransformer HAS_TRANSFORMERS = True except ImportError: HAS_TRANSFORMERS = False try: import nltk from nltk.tokenize import sent_tokenize, word_tokenize from nltk.corpus import stopwords HAS_NLTK = True except ImportError: HAS_NLTK = False try: from datasets import Dataset, DatasetDict HAS_DATASETS = True except ImportError: HAS_DATASETS = False # Configure logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) # Download NLTK data if available if HAS_NLTK: try: nltk.download('punkt', quiet=True) nltk.download('stopwords', quiet=True) nltk.download('averaged_perceptron_tagger', quiet=True) except: pass @dataclass class ScrapedItem: """Data class for scraped content""" id: str url: str title: str content: str metadata: Dict[str, Any] scraped_at: str word_count: int language: str = "en" quality_score: float = 0.0 labels: List[str] = None annotations: Dict[str, Any] = None def __post_init__(self): if self.labels is None: self.labels = [] if self.annotations is None: self.annotations = {} @dataclass class DatasetTemplate: """Template for dataset creation""" name: str description: str task_type: str # classification, ner, qa, summarization, etc. required_fields: List[str] optional_fields: List[str] example_format: Dict[str, Any] instructions: str class WebScraperEngine: """Advanced web scraping engine with smart content extraction""" def __init__(self): self.session = requests.Session() self.session.headers.update({ 'User-Agent': 'Mozilla/5.0 (compatible; AI-DatasetStudio/1.0; Research)', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5', 'Accept-Encoding': 'gzip, deflate', 'Connection': 'keep-alive', }) # Initialize AI models if available self.content_classifier = None self.quality_scorer = None self._load_models() def _load_models(self): """Load AI models for content analysis""" if not HAS_TRANSFORMERS: logger.warning("⚠️ Transformers not available, using rule-based methods") return try: # Content quality assessment self.quality_scorer = pipeline( "text-classification", model="martin-ha/toxic-comment-model", return_all_scores=True ) logger.info("✅ Quality assessment model loaded") except Exception as e: logger.warning(f"⚠️ Could not load quality model: {e}") def scrape_url(self, url: str) -> Optional[ScrapedItem]: """Scrape a single URL and return structured data""" try: # Validate URL if not self._is_valid_url(url): raise ValueError("Invalid URL provided") # Fetch content response = self.session.get(url, timeout=15) response.raise_for_status() # Parse HTML soup = BeautifulSoup(response.content, 'html.parser') # Extract structured data title = self._extract_title(soup) content = self._extract_content(soup) metadata = self._extract_metadata(soup, response) # Create scraped item item = ScrapedItem( id=str(uuid.uuid4()), url=url, title=title, content=content, metadata=metadata, scraped_at=datetime.now().isoformat(), word_count=len(content.split()), quality_score=self._assess_quality(content) ) return item except Exception as e: logger.error(f"Failed to scrape {url}: {e}") return None def batch_scrape(self, urls: List[str], progress_callback=None) -> List[ScrapedItem]: """Scrape multiple URLs with progress tracking""" results = [] total = len(urls) for i, url in enumerate(urls): if progress_callback: progress_callback(i / total, f"Scraping {i+1}/{total}: {url[:50]}...") item = self.scrape_url(url) if item: results.append(item) # Rate limiting time.sleep(1) return results def _is_valid_url(self, url: str) -> bool: """Validate URL format and safety""" try: parsed = urlparse(url) return parsed.scheme in ['http', 'https'] and parsed.netloc except: return False def _extract_title(self, soup: BeautifulSoup) -> str: """Extract page title""" # Try multiple selectors selectors = [ 'meta[property="og:title"]', 'meta[name="twitter:title"]', 'title', 'h1' ] for selector in selectors: element = soup.select_one(selector) if element: if element.name == 'meta': return element.get('content', '').strip() else: return element.get_text().strip() return "Untitled" def _extract_content(self, soup: BeautifulSoup) -> str: """Extract main content using multiple strategies""" # Remove unwanted elements for element in soup(['script', 'style', 'nav', 'header', 'footer', 'aside']): element.decompose() # Try content-specific selectors content_selectors = [ 'article', 'main', '.content', '.post-content', '.entry-content', '.article-body', '[role="main"]' ] for selector in content_selectors: element = soup.select_one(selector) if element: text = element.get_text(separator=' ', strip=True) if len(text) > 200: return self._clean_text(text) # Fallback to body body = soup.find('body') if body: return self._clean_text(body.get_text(separator=' ', strip=True)) return self._clean_text(soup.get_text(separator=' ', strip=True)) def _extract_metadata(self, soup: BeautifulSoup, response) -> Dict[str, Any]: """Extract metadata from page""" metadata = { 'domain': urlparse(response.url).netloc, 'status_code': response.status_code, 'content_type': response.headers.get('content-type', ''), 'extracted_at': datetime.now().isoformat() } # Extract meta tags meta_tags = ['description', 'keywords', 'author', 'published_time'] for tag in meta_tags: element = soup.find('meta', attrs={'name': tag}) or soup.find('meta', attrs={'property': f'article:{tag}'}) if element: metadata[tag] = element.get('content', '') return metadata def _clean_text(self, text: str) -> str: """Clean extracted text""" # Remove extra whitespace text = re.sub(r'\s+', ' ', text) # Remove common patterns patterns = [ r'Subscribe.*?newsletter', r'Click here.*?more', r'Advertisement', r'Share this.*?social', r'Follow us on.*?media' ] for pattern in patterns: text = re.sub(pattern, '', text, flags=re.IGNORECASE) return text.strip() def _assess_quality(self, content: str) -> float: """Assess content quality (0-1 score)""" if not content: return 0.0 score = 0.0 # Length check word_count = len(content.split()) if word_count >= 50: score += 0.3 elif word_count >= 20: score += 0.1 # Structure check (sentences) sentence_count = len(re.split(r'[.!?]+', content)) if sentence_count >= 3: score += 0.2 # Language quality (basic) if re.search(r'[A-Z][a-z]+', content): # Proper capitalization score += 0.2 if not re.search(r'[^\w\s]', content[:100]): # No weird characters at start score += 0.1 # Readability (simple check) avg_word_length = np.mean([len(word) for word in content.split()]) if 3 <= avg_word_length <= 8: score += 0.2 return min(score, 1.0) class DataProcessor: """Advanced data processing and cleaning pipeline""" def __init__(self): self.language_detector = None self.sentiment_analyzer = None self.ner_model = None self._load_models() def _load_models(self): """Load NLP models for processing""" if not HAS_TRANSFORMERS: return try: # Sentiment analysis self.sentiment_analyzer = pipeline( "sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment-latest" ) # Named Entity Recognition self.ner_model = pipeline( "ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", aggregation_strategy="simple" ) logger.info("✅ NLP models loaded successfully") except Exception as e: logger.warning(f"⚠️ Could not load NLP models: {e}") def process_items(self, items: List[ScrapedItem], processing_options: Dict[str, bool]) -> List[ScrapedItem]: """Process scraped items with various enhancement options""" processed_items = [] for item in items: processed_item = self._process_single_item(item, processing_options) if processed_item: processed_items.append(processed_item) return processed_items def _process_single_item(self, item: ScrapedItem, options: Dict[str, bool]) -> Optional[ScrapedItem]: """Process a single item""" try: # Clean content if options.get('clean_text', True): item.content = self._clean_text_advanced(item.content) # Filter by quality if options.get('quality_filter', True) and item.quality_score < 0.3: return None # Add sentiment analysis if options.get('add_sentiment', False) and self.sentiment_analyzer: sentiment = self._analyze_sentiment(item.content) item.metadata['sentiment'] = sentiment # Add named entities if options.get('extract_entities', False) and self.ner_model: entities = self._extract_entities(item.content) item.metadata['entities'] = entities # Add language detection if options.get('detect_language', True): item.language = self._detect_language(item.content) return item except Exception as e: logger.error(f"Error processing item {item.id}: {e}") return None def _clean_text_advanced(self, text: str) -> str: """Advanced text cleaning""" # Remove URLs text = re.sub(r'http\S+|www\.\S+', '', text) # Remove email addresses text = re.sub(r'\S+@\S+', '', text) # Remove excessive punctuation text = re.sub(r'[!?]{2,}', '!', text) text = re.sub(r'\.{3,}', '...', text) # Normalize whitespace text = re.sub(r'\s+', ' ', text) # Remove very short paragraphs (likely navigation) paragraphs = text.split('\n') paragraphs = [p.strip() for p in paragraphs if len(p.strip()) > 20] return '\n'.join(paragraphs).strip() def _analyze_sentiment(self, text: str) -> Dict[str, Any]: """Analyze sentiment of text""" try: # Truncate text for model limits text_sample = text[:512] result = self.sentiment_analyzer(text_sample)[0] return { 'label': result['label'], 'score': result['score'] } except: return {'label': 'UNKNOWN', 'score': 0.0} def _extract_entities(self, text: str) -> List[Dict[str, Any]]: """Extract named entities""" try: # Truncate text for model limits text_sample = text[:512] entities = self.ner_model(text_sample) return [ { 'text': ent['word'], 'label': ent['entity_group'], 'confidence': ent['score'] } for ent in entities ] except: return [] def _detect_language(self, text: str) -> str: """Simple language detection""" # Basic heuristic - could be enhanced with proper language detection if re.search(r'[а-яё]', text.lower()): return 'ru' elif re.search(r'[ñáéíóúü]', text.lower()): return 'es' elif re.search(r'[àâäçéèêëïîôöùûüÿ]', text.lower()): return 'fr' else: return 'en' class AnnotationEngine: """Interactive annotation tools for dataset creation""" def __init__(self): self.templates = self._load_templates() def _load_templates(self) -> Dict[str, DatasetTemplate]: """Load predefined dataset templates""" templates = { 'text_classification': DatasetTemplate( name="Text Classification", description="Classify text into predefined categories", task_type="classification", required_fields=["text", "label"], optional_fields=["confidence", "metadata"], example_format={"text": "Sample text", "label": "positive"}, instructions="Label each text with the appropriate category" ), 'sentiment_analysis': DatasetTemplate( name="Sentiment Analysis", description="Analyze emotional tone of text", task_type="classification", required_fields=["text", "sentiment"], optional_fields=["confidence", "aspects"], example_format={"text": "I love this!", "sentiment": "positive"}, instructions="Classify the sentiment as positive, negative, or neutral" ), 'named_entity_recognition': DatasetTemplate( name="Named Entity Recognition", description="Identify and classify named entities in text", task_type="ner", required_fields=["text", "entities"], optional_fields=["metadata"], example_format={ "text": "John works at OpenAI in San Francisco", "entities": [ {"text": "John", "label": "PERSON", "start": 0, "end": 4}, {"text": "OpenAI", "label": "ORG", "start": 14, "end": 20} ] }, instructions="Mark all named entities (people, organizations, locations, etc.)" ), 'question_answering': DatasetTemplate( name="Question Answering", description="Create question-answer pairs from text", task_type="qa", required_fields=["context", "question", "answer"], optional_fields=["answer_start", "metadata"], example_format={ "context": "The capital of France is Paris.", "question": "What is the capital of France?", "answer": "Paris" }, instructions="Create meaningful questions and provide accurate answers" ), 'summarization': DatasetTemplate( name="Text Summarization", description="Create concise summaries of longer texts", task_type="summarization", required_fields=["text", "summary"], optional_fields=["summary_type", "length"], example_format={ "text": "Long article text...", "summary": "Brief summary of the main points" }, instructions="Write clear, concise summaries capturing key information" ) } return templates def create_annotation_interface(self, template_name: str, items: List[ScrapedItem]) -> Dict[str, Any]: """Create annotation interface for specific template""" template = self.templates.get(template_name) if not template: raise ValueError(f"Unknown template: {template_name}") # Prepare data for annotation annotation_data = [] for item in items: annotation_data.append({ 'id': item.id, 'text': item.content[:1000], # Truncate for UI 'title': item.title, 'url': item.url, 'annotations': {} }) return { 'template': template, 'data': annotation_data, 'progress': 0, 'completed': 0 } class DatasetExporter: """Export datasets in various formats for ML frameworks""" def __init__(self): self.supported_formats = [ 'huggingface_datasets', 'json', 'csv', 'parquet', 'jsonl', 'pytorch', 'tensorflow' ] def export_dataset(self, items: List[ScrapedItem], template: DatasetTemplate, export_format: str, annotations: Dict[str, Any] = None) -> str: """Export annotated dataset in specified format""" try: # Prepare dataset dataset_data = self._prepare_dataset_data(items, template, annotations) # Export based on format if export_format == 'huggingface_datasets': return self._export_huggingface(dataset_data, template) elif export_format == 'json': return self._export_json(dataset_data) elif export_format == 'csv': return self._export_csv(dataset_data) elif export_format == 'jsonl': return self._export_jsonl(dataset_data) else: raise ValueError(f"Unsupported format: {export_format}") except Exception as e: logger.error(f"Export failed: {e}") raise def _prepare_dataset_data(self, items: List[ScrapedItem], template: DatasetTemplate, annotations: Dict[str, Any] = None) -> List[Dict[str, Any]]: """Prepare data according to template format""" dataset_data = [] for item in items: # Base data from scraped item data_point = { 'text': item.content, 'title': item.title, 'url': item.url, 'metadata': item.metadata } # Add annotations if available if annotations and item.id in annotations: item_annotations = annotations[item.id] data_point.update(item_annotations) # Format according to template formatted_point = self._format_for_template(data_point, template) if formatted_point: dataset_data.append(formatted_point) return dataset_data def _format_for_template(self, data_point: Dict[str, Any], template: DatasetTemplate) -> Dict[str, Any]: """Format data point according to template requirements""" formatted = {} # Ensure required fields are present for field in template.required_fields: if field in data_point: formatted[field] = data_point[field] elif field == 'text' and 'content' in data_point: formatted[field] = data_point['content'] else: # Skip this data point if required field is missing return None # Add optional fields if present for field in template.optional_fields: if field in data_point: formatted[field] = data_point[field] return formatted def _export_huggingface(self, dataset_data: List[Dict[str, Any]], template: DatasetTemplate) -> str: """Export as HuggingFace Dataset""" if not HAS_DATASETS: raise ImportError("datasets library not available") try: # Create dataset dataset = Dataset.from_list(dataset_data) # Create dataset card card_content = f""" # {template.name} Dataset ## Description {template.description} ## Task Type {template.task_type} ## Format {template.example_format} ## Instructions {template.instructions} ## Statistics - Total samples: {len(dataset_data)} - Created: {datetime.now().isoformat()} ## Usage ```python from datasets import load_dataset dataset = load_dataset('path/to/dataset') ``` """ # Save dataset timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") dataset_name = f"{template.name.lower().replace(' ', '_')}_{timestamp}" # Save locally (would push to Hub in production) dataset.save_to_disk(dataset_name) # Create info file with open(f"{dataset_name}/README.md", "w") as f: f.write(card_content) return dataset_name except Exception as e: logger.error(f"HuggingFace export failed: {e}") raise def _export_json(self, dataset_data: List[Dict[str, Any]]) -> str: """Export as JSON file""" timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"dataset_{timestamp}.json" with open(filename, 'w', encoding='utf-8') as f: json.dump(dataset_data, f, indent=2, ensure_ascii=False) return filename def _export_csv(self, dataset_data: List[Dict[str, Any]]) -> str: """Export as CSV file""" timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"dataset_{timestamp}.csv" df = pd.DataFrame(dataset_data) df.to_csv(filename, index=False) return filename def _export_jsonl(self, dataset_data: List[Dict[str, Any]]) -> str: """Export as JSONL file""" timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"dataset_{timestamp}.jsonl" with open(filename, 'w', encoding='utf-8') as f: for item in dataset_data: f.write(json.dumps(item, ensure_ascii=False) + '\n') return filename def create_modern_interface(): """Create modern, intuitive interface for AI Dataset Studio""" # Initialize the studio studio = DatasetStudio() # Custom CSS for modern appearance custom_css = """ .gradio-container { max-width: 1400px; margin: auto; font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; } .studio-header { background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 2rem; border-radius: 15px; margin-bottom: 2rem; text-align: center; box-shadow: 0 8px 32px rgba(0,0,0,0.1); } .workflow-card { background: #f8f9ff; border: 2px solid #e1e5ff; border-radius: 12px; padding: 1.5rem; margin: 1rem 0; transition: all 0.3s ease; } .workflow-card:hover { border-color: #667eea; box-shadow: 0 4px 20px rgba(102, 126, 234, 0.1); } .step-header { display: flex; align-items: center; margin-bottom: 1rem; font-size: 1.2em; font-weight: 600; color: #4c51bf; } .step-number { background: #667eea; color: white; border-radius: 50%; width: 30px; height: 30px; display: flex; align-items: center; justify-content: center; margin-right: 1rem; font-weight: bold; } .feature-grid { display: grid; grid-template-columns: repeat(auto-fit, minmax(300px, 1fr)); gap: 1rem; margin: 1rem 0; } .feature-item { background: white; border: 1px solid #e2e8f0; border-radius: 8px; padding: 1rem; text-align: center; } .stat-card { background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%); color: white; padding: 1rem; border-radius: 10px; text-align: center; margin: 0.5rem; } .progress-bar { background: #e2e8f0; border-radius: 10px; height: 8px; overflow: hidden; } .progress-fill { background: linear-gradient(90deg, #667eea 0%, #764ba2 100%); height: 100%; transition: width 0.3s ease; } .template-card { border: 2px solid #e2e8f0; border-radius: 10px; padding: 1rem; margin: 0.5rem; cursor: pointer; transition: all 0.3s ease; } .template-card:hover { border-color: #667eea; transform: translateY(-2px); box-shadow: 0 4px 12px rgba(0,0,0,0.1); } .template-selected { border-color: #667eea; background: #f7fafc; } .export-option { background: #f7fafc; border: 1px solid #e2e8f0; border-radius: 8px; padding: 1rem; margin: 0.5rem 0; cursor: pointer; } .export-option:hover { background: #edf2f7; border-color: #cbd5e0; } .success-message { background: #f0fff4; border: 1px solid #9ae6b4; color: #276749; padding: 1rem; border-radius: 8px; margin: 1rem 0; } .error-message { background: #fed7d7; border: 1px solid #feb2b2; color: #c53030; padding: 1rem; border-radius: 8px; margin: 1rem 0; } """ # Project state for UI project_state = gr.State({}) with gr.Blocks(css=custom_css, title="AI Dataset Studio", theme=gr.themes.Soft()) as interface: # Header gr.HTML("""
Create high-quality training datasets without coding - Your personal Scale AI
Web Scraping → Data Processing → Annotation → ML-Ready Datasets
Define your dataset project and choose the type of AI task you're building for.
Categorize text into predefined labels
Great for: Spam detection, topic classificationAnalyze emotional tone and opinions
Great for: Review analysis, social media monitoringIdentify people, places, organizations
Great for: Information extraction, content taggingProvide URLs to scrape content automatically. Our AI will extract clean, structured text.
Configure how to clean and enhance your scraped data with AI-powered analysis.
Review your processed data before annotation or export.
Export your dataset in various formats for different ML frameworks and platforms.
Ready for transformers library
Universal format for any framework
Easy analysis in Excel/Pandas
{success_count} items collected
{len(urls) - success_count} failed
{processed_count} items processed
Avg Quality: {stats.get('avg_quality_score', 0)}
Avg Words: {stats.get('avg_word_count', 0)}