Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,30 +3,58 @@ import pandas as pd
|
|
3 |
import re
|
4 |
from groq import Groq
|
5 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Set Groq API Key
|
8 |
GROQ_API_KEY = "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"
|
9 |
client = Groq(api_key=GROQ_API_KEY)
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
# Function: Clean Dataset
|
12 |
-
def clean_data(data):
|
13 |
# Fill missing values
|
14 |
-
data.fillna(method=
|
15 |
-
data.fillna(method=
|
16 |
-
|
17 |
# Remove duplicates
|
18 |
data = data.drop_duplicates()
|
19 |
-
|
20 |
# Normalize and clean text columns
|
21 |
-
for col in data.select_dtypes(include=[
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
24 |
return data
|
25 |
|
26 |
# Function: Chunk Text
|
27 |
def chunk_text(text, max_length=100):
|
28 |
words = text.split()
|
29 |
-
return [
|
30 |
|
31 |
# Function: Generate Embeddings
|
32 |
def generate_embeddings(chunk):
|
@@ -38,40 +66,60 @@ def generate_embeddings(chunk):
|
|
38 |
return chat_completion.choices[0].message.content
|
39 |
|
40 |
# Main Function: Process Data
|
41 |
-
def process_dataset(file):
|
42 |
# Load data
|
43 |
-
data = pd.read_csv(file)
|
44 |
-
|
|
|
|
|
|
|
45 |
# Step 1: Clean data
|
46 |
-
cleaned_data = clean_data(data)
|
47 |
-
|
48 |
-
# Step 2: Create chunks
|
49 |
-
if
|
50 |
-
cleaned_data[
|
51 |
else:
|
52 |
-
return "Error: 'text_column' not found in the dataset."
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
-
# Step 3: Generate embeddings
|
55 |
-
cleaned_data['embeddings'] = cleaned_data['chunks'].apply(lambda chunks: [generate_embeddings(chunk) for chunk in chunks])
|
56 |
-
|
57 |
# Save cleaned data with embeddings
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
# Gradio UI
|
62 |
-
def gradio_interface(file):
|
63 |
-
|
64 |
-
|
|
|
|
|
65 |
|
66 |
# Gradio App
|
67 |
ui = gr.Interface(
|
68 |
fn=gradio_interface,
|
69 |
-
inputs=
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
)
|
76 |
|
77 |
# Launch App
|
|
|
3 |
import re
|
4 |
from groq import Groq
|
5 |
import gradio as gr
|
6 |
+
from nltk.corpus import stopwords
|
7 |
+
# Removed tqdm import due to compatibility issues
|
8 |
+
|
9 |
+
# Set stopwords for text cleaning (you can hard-code them if you face download issues)
|
10 |
+
STOPWORDS = set([
|
11 |
+
"i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you", "your", "yours", "yourself",
|
12 |
+
"yourselves", "he", "him", "his", "himself", "she", "her", "hers", "herself", "it", "its", "itself",
|
13 |
+
"they", "them", "their", "theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these",
|
14 |
+
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has", "had", "having", "do", "does",
|
15 |
+
"did", "doing", "a", "an", "the", "and", "but", "if", "or", "because", "as", "until", "while", "of", "at", "by",
|
16 |
+
"for", "with", "about", "against", "between", "into", "through", "during", "before", "after", "above", "below",
|
17 |
+
"to", "from", "up", "down", "in", "out", "on", "off", "over", "under", "again", "further", "then", "once", "here",
|
18 |
+
"there", "when", "where", "why", "how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such",
|
19 |
+
"no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t", "can", "will", "just", "don", "should",
|
20 |
+
"now", "d", "ll", "m", "o", "re", "ve", "y", "ain", "aren", "couldn", "didn", "doesn", "hadn", "hasn", "haven", "isn",
|
21 |
+
"ma", "mightn", "mustn", "needn", "shan", "shouldn", "wasn", "weren", "won", "wouldn"
|
22 |
+
])
|
23 |
|
24 |
# Set Groq API Key
|
25 |
GROQ_API_KEY = "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"
|
26 |
client = Groq(api_key=GROQ_API_KEY)
|
27 |
|
28 |
+
# Function: Generate Missing Data Report
|
29 |
+
def missing_data_report(data):
|
30 |
+
missing_report = data.isnull().sum()
|
31 |
+
total_missing = missing_report.sum()
|
32 |
+
return f"Missing Data Report:\n\n{missing_report}\n\nTotal Missing Values: {total_missing}"
|
33 |
+
|
34 |
# Function: Clean Dataset
|
35 |
+
def clean_data(data, lowercase=True, remove_punctuation=True, remove_stopwords=False):
|
36 |
# Fill missing values
|
37 |
+
data.fillna(method="ffill", inplace=True)
|
38 |
+
data.fillna(method="bfill", inplace=True)
|
39 |
+
|
40 |
# Remove duplicates
|
41 |
data = data.drop_duplicates()
|
42 |
+
|
43 |
# Normalize and clean text columns
|
44 |
+
for col in data.select_dtypes(include=["object"]).columns:
|
45 |
+
if lowercase:
|
46 |
+
data[col] = data[col].str.lower()
|
47 |
+
if remove_punctuation:
|
48 |
+
data[col] = data[col].apply(lambda x: re.sub(r"[^\w\s]", "", str(x)))
|
49 |
+
if remove_stopwords:
|
50 |
+
data[col] = data[col].apply(lambda x: " ".join([word for word in str(x).split() if word not in STOPWORDS]))
|
51 |
+
|
52 |
return data
|
53 |
|
54 |
# Function: Chunk Text
|
55 |
def chunk_text(text, max_length=100):
|
56 |
words = text.split()
|
57 |
+
return [" ".join(words[i : i + max_length]) for i in range(0, len(words), max_length)]
|
58 |
|
59 |
# Function: Generate Embeddings
|
60 |
def generate_embeddings(chunk):
|
|
|
66 |
return chat_completion.choices[0].message.content
|
67 |
|
68 |
# Main Function: Process Data
|
69 |
+
def process_dataset(file, chunk_size=100, lowercase=True, remove_punctuation=True, remove_stopwords=False):
|
70 |
# Load data
|
71 |
+
data = pd.read_csv(file.name)
|
72 |
+
|
73 |
+
# Generate missing data report
|
74 |
+
missing_report = missing_data_report(data)
|
75 |
+
|
76 |
# Step 1: Clean data
|
77 |
+
cleaned_data = clean_data(data, lowercase, remove_punctuation, remove_stopwords)
|
78 |
+
|
79 |
+
# Step 2: Create chunks (removed tqdm)
|
80 |
+
if "text_column" in cleaned_data.columns:
|
81 |
+
cleaned_data["chunks"] = cleaned_data["text_column"].apply(lambda x: chunk_text(x, max_length=chunk_size))
|
82 |
else:
|
83 |
+
return "Error: 'text_column' not found in the dataset.", None, None
|
84 |
+
|
85 |
+
# Step 3: Generate embeddings (removed tqdm)
|
86 |
+
cleaned_data["embeddings"] = cleaned_data["chunks"].apply(
|
87 |
+
lambda chunks: [generate_embeddings(chunk) for chunk in chunks]
|
88 |
+
)
|
89 |
|
|
|
|
|
|
|
90 |
# Save cleaned data with embeddings
|
91 |
+
output_file = "processed_data.csv"
|
92 |
+
cleaned_data.to_csv(output_file, index=False)
|
93 |
+
|
94 |
+
# Display sample embeddings
|
95 |
+
embedding_sample = cleaned_data["embeddings"].head(5).to_string()
|
96 |
+
|
97 |
+
return missing_report, embedding_sample, output_file
|
98 |
|
99 |
# Gradio UI
|
100 |
+
def gradio_interface(file, chunk_size, lowercase, remove_punctuation, remove_stopwords):
|
101 |
+
missing_report, embedding_sample, output_file = process_dataset(
|
102 |
+
file, chunk_size, lowercase, remove_punctuation, remove_stopwords
|
103 |
+
)
|
104 |
+
return missing_report, embedding_sample, output_file
|
105 |
|
106 |
# Gradio App
|
107 |
ui = gr.Interface(
|
108 |
fn=gradio_interface,
|
109 |
+
inputs=[
|
110 |
+
gr.File(label="Upload CSV Dataset"),
|
111 |
+
gr.Slider(50, 500, step=50, value=100, label="Chunk Size (words)"),
|
112 |
+
gr.Checkbox(label="Convert Text to Lowercase", value=True),
|
113 |
+
gr.Checkbox(label="Remove Punctuation", value=True),
|
114 |
+
gr.Checkbox(label="Remove Stopwords", value=False),
|
115 |
+
],
|
116 |
+
outputs=[
|
117 |
+
gr.Text(label="Missing Data Report"),
|
118 |
+
gr.Text(label="Embedding Sample"),
|
119 |
+
gr.File(label="Download Processed Dataset"),
|
120 |
+
],
|
121 |
+
title="Enhanced Data Cleaning and Embedding Tool",
|
122 |
+
description="Upload your dataset to clean, chunk, and generate embeddings using Llama LLM with Groq API.",
|
123 |
)
|
124 |
|
125 |
# Launch App
|