Create data_preprocessing.py
Browse files- data_preprocessing.py +100 -0
data_preprocessing.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
This code outlines the data pre-processing work done (text: cleaning/ tokenizing and images: extracting text from the image using pytesseract).
|
3 |
+
The dataset in the repo is the processed one.
|
4 |
+
'''
|
5 |
+
|
6 |
+
! pip install datasets q
|
7 |
+
|
8 |
+
from datasets import load_dataset
|
9 |
+
|
10 |
+
dataset = load_dataset("mo-mittal/reddit_political_subs", trust_remote_code=True)
|
11 |
+
|
12 |
+
# Install Tesseract OCR
|
13 |
+
!sudo apt install tesseract-ocr
|
14 |
+
|
15 |
+
# Install pytesseract
|
16 |
+
!pip install pytesseract
|
17 |
+
|
18 |
+
import pytesseract
|
19 |
+
from concurrent.futures import ThreadPoolExecutor
|
20 |
+
|
21 |
+
# this assumes 'dataset' is already loaded and contains the earlier data. I had done the preprocessing on an earlier version of the current dataset.
|
22 |
+
|
23 |
+
#### you can also check how many CPU cores are available for the threadpool processor
|
24 |
+
|
25 |
+
# import os
|
26 |
+
# num_cores = os.cpu_count()
|
27 |
+
# print(f"Number of available CPU cores: {num_cores}")
|
28 |
+
|
29 |
+
|
30 |
+
def ocr_image(index):
|
31 |
+
try:
|
32 |
+
# Retrieve the image and associated URL from the dataset.
|
33 |
+
image_pil = dataset['train'][index]['image']
|
34 |
+
url = dataset['train'][index]['url']
|
35 |
+
|
36 |
+
# Check if 'image_text' is empty and if the word 'imgur' is in the 'url' field.
|
37 |
+
if (dataset['train'][index]['image_text'] == '') and ('imgur' in url):
|
38 |
+
text = pytesseract.image_to_string(image_pil)
|
39 |
+
print(f'Sublime! Processed img at {index}')
|
40 |
+
return text
|
41 |
+
except Exception as e:
|
42 |
+
# Print error message for any kind of Exception
|
43 |
+
# print(f"Error processing image at index {index}: {e}")
|
44 |
+
return None
|
45 |
+
|
46 |
+
# Create a ThreadPoolExecutor to parallelize image processing.
|
47 |
+
num_workers = 8 # Adjust this number based on your machine's capabilities.
|
48 |
+
imgur_text = []
|
49 |
+
|
50 |
+
# Use ThreadPoolExecutor to create a pool of threads.
|
51 |
+
with ThreadPoolExecutor(max_workers=num_workers) as executor:
|
52 |
+
# Use map to apply 'ocr_image' function to each index.
|
53 |
+
results = list(executor.map(ocr_image, range(len(dataset['train']))))
|
54 |
+
|
55 |
+
# Now 'results' contains the OCR results or None for each image.
|
56 |
+
# Filter out None values and add the text to 'imgur_text' list.
|
57 |
+
imgur_text.extend(filter(None, results))
|
58 |
+
|
59 |
+
# 'imgur_text' now contains all the OCR results. This can take quite a while to process! (About 7503 images and 12 hours for an 8 core CPU)
|
60 |
+
|
61 |
+
## for text cleaning
|
62 |
+
|
63 |
+
import pandas as pd
|
64 |
+
|
65 |
+
df = pd.DataFrame(dataset['train'].remove_columns(['image']))
|
66 |
+
# df.head()
|
67 |
+
|
68 |
+
results_list = list(results)
|
69 |
+
|
70 |
+
for i, text in enumerate(results_list):
|
71 |
+
if text is not None:
|
72 |
+
df.loc[i, 'image_text'] = text
|
73 |
+
|
74 |
+
# df.to_csv('reddit_political_subs.csv', index=False)
|
75 |
+
import pandas as pd
|
76 |
+
|
77 |
+
df = pd.read_csv('reddit_political_subs.csv')
|
78 |
+
|
79 |
+
import nltk
|
80 |
+
nltk.download('stopwords')
|
81 |
+
|
82 |
+
import re
|
83 |
+
from nltk.corpus import stopwords
|
84 |
+
import string
|
85 |
+
|
86 |
+
def clean_text(text):
|
87 |
+
text = re.sub(r'\\n', ' ', text)
|
88 |
+
text = re.sub(r'\\x..', '', text)
|
89 |
+
text = re.sub(r'[@|\\]', '', text)
|
90 |
+
|
91 |
+
text = text.translate(str.maketrans('', '', string.punctuation))
|
92 |
+
stop_words = set(stopwords.words('english'))
|
93 |
+
text = ' '.join([word for word in text.split() if word not in stop_words])
|
94 |
+
|
95 |
+
text = ' '.join(text.split())
|
96 |
+
text = text.lower()
|
97 |
+
|
98 |
+
return text
|
99 |
+
|
100 |
+
df['image_text'] = df['image_text'].apply(clean_text)
|