task_type
stringclasses
4 values
code_task
stringclasses
15 values
start_line
int64
4
1.79k
end_line
int64
4
1.8k
before
stringlengths
79
76.1k
between
stringlengths
17
806
after
stringlengths
2
72.6k
reason_categories_output
stringlengths
2
2.24k
horizon_categories_output
stringlengths
83
3.99k
reason_freq_analysis
stringclasses
150 values
horizon_freq_analysis
stringlengths
23
185
infilling_python
Image_Filtering
94
96
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):']
[' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum']
['', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 94 is imported at line 1 and has a Long-Range dependency. Variable 'image' used at line 94 is defined at line 93 and has a Short-Range dependency. Library 'np' used at line 95 is imported at line 1 and has a Long-Range dependency. Variable 'spectrum' used at line 95 is defined at line 94 and has a Short-Range dependency. Variable 'log_spectrum' used at line 96 is defined at line 95 and has a Short-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
95
96
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))']
[' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum']
['', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 95 is imported at line 1 and has a Long-Range dependency. Variable 'spectrum' used at line 95 is defined at line 94 and has a Short-Range dependency. Variable 'log_spectrum' used at line 96 is defined at line 95 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
infilling_python
Image_Filtering
99
99
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)']
['spectrum_B = compute_fourier_magnitude(img_b_gray)']
['spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Function 'compute_fourier_magnitude' used at line 99 is defined at line 93 and has a Short-Range dependency. Variable 'img_b_gray' used at line 99 is defined at line 45 and has a Long-Range dependency.
{}
{'Function Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
Image_Filtering
100
100
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)']
['spectrum_blurred_B = compute_fourier_magnitude(blur_b)']
['spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Function 'compute_fourier_magnitude' used at line 100 is defined at line 93 and has a Short-Range dependency. Variable 'blur_b' used at line 100 is defined at line 61 and has a Long-Range dependency.
{}
{'Function Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
Image_Filtering
101
101
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)']
['spectrum_A_blur_A = compute_fourier_magnitude(a_diff)']
['spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Function 'compute_fourier_magnitude' used at line 101 is defined at line 93 and has a Short-Range dependency. Variable 'a_diff' used at line 101 is defined at line 63 and has a Long-Range dependency.
{}
{'Function Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
Image_Filtering
102
102
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)']
['spectrum_C = compute_fourier_magnitude(img_c)']
['print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Function 'compute_fourier_magnitude' used at line 102 is defined at line 93 and has a Short-Range dependency. Variable 'img_c' used at line 102 is defined at line 64 and has a Long-Range dependency.
{}
{'Function Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
Image_Filtering
114
114
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)']
['B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)']
['', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'cv2' used at line 114 is imported at line 2 and has a Long-Range dependency. Variable 'orange' used at line 114 is defined at line 112 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
118
120
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]']
['for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)']
['', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 118}, {'reason_category': 'Loop Body', 'usage_line': 119}, {'reason_category': 'Loop Body', 'usage_line': 120}]
Library 'cv2' used at line 119 is imported at line 2 and has a Long-Range dependency. Variable 'G' used at line 119 is defined at line 116 and has a Short-Range dependency. Variable 'gpA' used at line 120 is defined at line 117 and has a Short-Range dependency. Variable 'G' used at line 120 is defined at line 119 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
123
127
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B']
['G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)']
['', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 125}, {'reason_category': 'Loop Body', 'usage_line': 126}, {'reason_category': 'Loop Body', 'usage_line': 127}]
Variable 'B' used at line 123 is defined at line 114 and has a Short-Range dependency. Variable 'G' used at line 124 is defined at line 123 and has a Short-Range dependency. Library 'cv2' used at line 126 is imported at line 2 and has a Long-Range dependency. Variable 'G' used at line 126 is defined at line 123 and has a Short-Range dependency. Variable 'gpB' used at line 127 is defined at line 124 and has a Short-Range dependency. Variable 'G' used at line 127 is defined at line 126 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 2}
{'Variable Short-Range': 5, 'Library Long-Range': 1}
infilling_python
Image_Filtering
132
134
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):']
[' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)']
['', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 132}, {'reason_category': 'Loop Body', 'usage_line': 133}, {'reason_category': 'Loop Body', 'usage_line': 134}]
Library 'cv2' used at line 132 is imported at line 2 and has a Long-Range dependency. Variable 'gpA' used at line 132 is defined at line 117 and has a Medium-Range dependency. Variable 'i' used at line 132 is part of a Loop defined at line 131 and has a Short-Range dependency. Library 'cv2' used at line 133 is imported at line 2 and has a Long-Range dependency. Variable 'gpA' used at line 133 is defined at line 117 and has a Medium-Range dependency. Variable 'i' used at line 133 is part of a Loop defined at line 131 and has a Short-Range dependency. Variable 'GE' used at line 133 is defined at line 132 and has a Short-Range dependency. Variable 'lpA' used at line 134 is defined at line 130 and has a Short-Range dependency. Variable 'L' used at line 134 is defined at line 133 and has a Short-Range dependency.
{'Loop Body': 3}
{'Library Long-Range': 2, 'Variable Medium-Range': 2, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
138
140
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):']
[' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)']
['', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 138}, {'reason_category': 'Loop Body', 'usage_line': 139}, {'reason_category': 'Loop Body', 'usage_line': 140}]
Library 'cv2' used at line 138 is imported at line 2 and has a Long-Range dependency. Variable 'gpB' used at line 138 is defined at line 124 and has a Medium-Range dependency. Variable 'i' used at line 138 is part of a Loop defined at line 137 and has a Short-Range dependency. Library 'cv2' used at line 139 is imported at line 2 and has a Long-Range dependency. Variable 'gpB' used at line 139 is defined at line 124 and has a Medium-Range dependency. Variable 'i' used at line 139 is part of a Loop defined at line 137 and has a Short-Range dependency. Variable 'GE' used at line 139 is defined at line 138 and has a Short-Range dependency. Variable 'lpB' used at line 140 is defined at line 136 and has a Short-Range dependency. Variable 'L' used at line 140 is defined at line 139 and has a Short-Range dependency.
{'Loop Body': 3}
{'Library Long-Range': 2, 'Variable Medium-Range': 2, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
144
147
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []']
['for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)']
['', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 144}, {'reason_category': 'Loop Body', 'usage_line': 145}, {'reason_category': 'Loop Body', 'usage_line': 146}, {'reason_category': 'Loop Body', 'usage_line': 147}]
Variable 'lpA' used at line 144 is defined at line 130 and has a Medium-Range dependency. Variable 'lpB' used at line 144 is defined at line 136 and has a Short-Range dependency. Variable 'la' used at line 145 is part of a Loop defined at line 144 and has a Short-Range dependency. Library 'np' used at line 146 is imported at line 1 and has a Long-Range dependency. Variable 'la' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'cols' used at line 146 is defined at line 145 and has a Short-Range dependency. Variable 'lb' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'LS' used at line 147 is defined at line 143 and has a Short-Range dependency. Variable 'ls' used at line 147 is defined at line 146 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 3}
{'Variable Medium-Range': 1, 'Variable Short-Range': 4, 'Variable Loop Short-Range': 3, 'Library Long-Range': 1}
infilling_python
Image_Filtering
145
147
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):']
[' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)']
['', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 145}, {'reason_category': 'Loop Body', 'usage_line': 146}, {'reason_category': 'Loop Body', 'usage_line': 147}]
Variable 'la' used at line 145 is part of a Loop defined at line 144 and has a Short-Range dependency. Library 'np' used at line 146 is imported at line 1 and has a Long-Range dependency. Variable 'la' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'cols' used at line 146 is defined at line 145 and has a Short-Range dependency. Variable 'lb' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'LS' used at line 147 is defined at line 143 and has a Short-Range dependency. Variable 'ls' used at line 147 is defined at line 146 and has a Short-Range dependency.
{'Loop Body': 3}
{'Variable Loop Short-Range': 3, 'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
146
147
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape']
[' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)']
['', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 146}, {'reason_category': 'Loop Body', 'usage_line': 147}]
Library 'np' used at line 146 is imported at line 1 and has a Long-Range dependency. Variable 'la' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'cols' used at line 146 is defined at line 145 and has a Short-Range dependency. Variable 'lb' used at line 146 is part of a Loop defined at line 144 and has a Short-Range dependency. Variable 'LS' used at line 147 is defined at line 143 and has a Short-Range dependency. Variable 'ls' used at line 147 is defined at line 146 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
152
153
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):']
[' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])']
['', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 152}, {'reason_category': 'Loop Body', 'usage_line': 153}]
Library 'cv2' used at line 152 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 152 is defined at line 150 and has a Short-Range dependency. Library 'cv2' used at line 153 is imported at line 2 and has a Long-Range dependency. Variable 'LS' used at line 153 is defined at line 143 and has a Short-Range dependency. Variable 'i' used at line 153 is part of a Loop defined at line 151 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 2, 'Variable Short-Range': 2, 'Variable Loop Short-Range': 1}
infilling_python
Image_Filtering
156
156
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half']
['real = np.hstack((A[:,:cols//2],B[:,cols//2:]))']
['', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 156 is imported at line 1 and has a Long-Range dependency. Variable 'A' used at line 156 is defined at line 113 and has a Long-Range dependency. Variable 'B' used at line 156 is defined at line 114 and has a Long-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Long-Range': 2}
infilling_python
Image_Filtering
158
158
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '']
['blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)']
['original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'cv2' used at line 158 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 158 is defined at line 150 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
159
159
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)']
['original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)']
['', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'cv2' used at line 159 is imported at line 2 and has a Long-Range dependency. Variable 'real' used at line 159 is defined at line 156 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
169
173
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):']
[' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask']
['', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 171}, {'reason_category': 'Loop Body', 'usage_line': 172}]
Library 'np' used at line 169 is imported at line 1 and has a Long-Range dependency. Variable 'shape' used at line 169 is defined at line 168 and has a Short-Range dependency. Variable 'mask' used at line 170 is defined at line 169 and has a Short-Range dependency. Variable 'height' used at line 171 is defined at line 170 and has a Short-Range dependency. Variable 'width' used at line 171 is defined at line 170 and has a Short-Range dependency. Variable 'mask' used at line 172 is defined at line 169 and has a Short-Range dependency. Variable 'i' used at line 172 is part of a Loop defined at line 171 and has a Short-Range dependency. Variable 'strip_width' used at line 172 is defined at line 168 and has a Short-Range dependency. Variable 'width' used at line 172 is defined at line 170 and has a Short-Range dependency. Variable 'mask' used at line 173 is defined at line 169 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 8, 'Variable Loop Short-Range': 1}
infilling_python
Image_Filtering
171
173
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape']
[' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask']
['', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Define Stop Criteria', 'usage_line': 171}, {'reason_category': 'Loop Body', 'usage_line': 172}]
Variable 'height' used at line 171 is defined at line 170 and has a Short-Range dependency. Variable 'width' used at line 171 is defined at line 170 and has a Short-Range dependency. Variable 'mask' used at line 172 is defined at line 169 and has a Short-Range dependency. Variable 'i' used at line 172 is part of a Loop defined at line 171 and has a Short-Range dependency. Variable 'strip_width' used at line 172 is defined at line 168 and has a Short-Range dependency. Variable 'width' used at line 172 is defined at line 170 and has a Short-Range dependency. Variable 'mask' used at line 173 is defined at line 169 and has a Short-Range dependency.
{'Define Stop Criteria': 1, 'Loop Body': 1}
{'Variable Short-Range': 6, 'Variable Loop Short-Range': 1}
infilling_python
Image_Filtering
177
177
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []']
['mask = create_diagonal_mask(A.shape)']
['M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Function 'create_diagonal_mask' used at line 177 is defined at line 168 and has a Short-Range dependency. Variable 'A' used at line 177 is defined at line 113 and has a Long-Range dependency.
{}
{'Function Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
Image_Filtering
181
182
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):']
[' M = cv2.pyrDown(M)', ' gpmask.append(M)']
['gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 181}, {'reason_category': 'Loop Body', 'usage_line': 182}]
Library 'cv2' used at line 181 is imported at line 2 and has a Long-Range dependency. Variable 'M' used at line 181 is defined at line 178 and has a Short-Range dependency. Variable 'gpmask' used at line 182 is defined at line 179 and has a Short-Range dependency. Variable 'M' used at line 182 is defined at line 181 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
185
187
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):']
[' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)']
['', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 185}, {'reason_category': 'Loop Body', 'usage_line': 186}, {'reason_category': 'Loop Body', 'usage_line': 187}]
Variable 'lpA' used at line 185 is defined at line 130 and has a Long-Range dependency. Variable 'i' used at line 185 is part of a Loop defined at line 184 and has a Short-Range dependency. Variable 'lpA' used at line 186 is defined at line 130 and has a Long-Range dependency. Variable 'i' used at line 186 is part of a Loop defined at line 184 and has a Short-Range dependency. Variable 'gpmask' used at line 186 is defined at line 183 and has a Short-Range dependency. Variable 'lpB' used at line 186 is defined at line 136 and has a Long-Range dependency. Variable 'LS' used at line 187 is defined at line 176 and has a Medium-Range dependency. Variable 'ls' used at line 187 is defined at line 186 and has a Short-Range dependency.
{'Loop Body': 3}
{'Variable Long-Range': 3, 'Variable Loop Short-Range': 2, 'Variable Short-Range': 2, 'Variable Medium-Range': 1}
infilling_python
Image_Filtering
192
194
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):']
[' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])']
['', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 192}, {'reason_category': 'Loop Body', 'usage_line': 193}, {'reason_category': 'Loop Body', 'usage_line': 194}]
Library 'cv2' used at line 192 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 192 is defined at line 190 and has a Short-Range dependency. Library 'cv2' used at line 193 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 193 is defined at line 192 and has a Short-Range dependency. Variable 'LS' used at line 193 is defined at line 176 and has a Medium-Range dependency. Variable 'i' used at line 193 is part of a Loop defined at line 191 and has a Short-Range dependency. Library 'cv2' used at line 194 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'LS' used at line 194 is defined at line 176 and has a Medium-Range dependency. Variable 'i' used at line 194 is part of a Loop defined at line 191 and has a Short-Range dependency.
{'Loop Body': 3}
{'Library Long-Range': 3, 'Variable Short-Range': 3, 'Variable Medium-Range': 2, 'Variable Loop Short-Range': 2}
infilling_python
Image_Filtering
197
197
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half']
['real = np.hstack((A[:, :cols//2], B[:, cols//2:]))']
['ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 197 is imported at line 1 and has a Long-Range dependency. Variable 'A' used at line 197 is defined at line 113 and has a Long-Range dependency. Variable 'B' used at line 197 is defined at line 114 and has a Long-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Long-Range': 2}
infilling_python
Image_Filtering
198
198
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))']
['ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)']
['real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'cv2' used at line 198 is imported at line 2 and has a Long-Range dependency. Variable 'ls_' used at line 198 is defined at line 190 and has a Short-Range dependency. Library 'np' used at line 198 is imported at line 1 and has a Long-Range dependency.
{}
{'Library Long-Range': 2, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
199
199
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)']
['real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)']
['', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'cv2' used at line 199 is imported at line 2 and has a Long-Range dependency. Variable 'real' used at line 199 is defined at line 197 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
211
211
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):']
[' cap = cv2.VideoCapture(filename)']
[' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'cv2' used at line 211 is imported at line 2 and has a Long-Range dependency. Variable 'filename' used at line 211 is defined at line 210 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
220
221
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '']
[' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)']
['', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 220}, {'reason_category': 'Loop Body', 'usage_line': 221}]
Library 'cv2' used at line 220 is imported at line 2 and has a Long-Range dependency. Variable 'frame' used at line 220 is defined at line 214 and has a Short-Range dependency. Variable 'frames' used at line 221 is defined at line 212 and has a Short-Range dependency. Variable 'frame' used at line 221 is defined at line 220 and has a Short-Range dependency.
{'Loop Body': 2}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
231
236
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):']
[' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal']
['', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'fs' used at line 231 is defined at line 230 and has a Short-Range dependency. Variable 'low_cutoff' used at line 232 is defined at line 230 and has a Short-Range dependency. Variable 'nyquist' used at line 232 is defined at line 231 and has a Short-Range dependency. Variable 'high_cutoff' used at line 233 is defined at line 230 and has a Short-Range dependency. Variable 'nyquist' used at line 233 is defined at line 231 and has a Short-Range dependency. Library 'butter' used at line 234 is imported at line 5 and has a Long-Range dependency. Variable 'order' used at line 234 is defined at line 230 and has a Short-Range dependency. Variable 'low' used at line 234 is defined at line 232 and has a Short-Range dependency. Variable 'high' used at line 234 is defined at line 233 and has a Short-Range dependency. Library 'filtfilt' used at line 235 is imported at line 5 and has a Long-Range dependency. Variable 'b' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'a' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'signal' used at line 235 is defined at line 230 and has a Short-Range dependency. Variable 'filtered_signal' used at line 236 is defined at line 235 and has a Short-Range dependency.
{}
{'Variable Short-Range': 12, 'Library Long-Range': 2}
infilling_python
Image_Filtering
233
233
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist']
[' high = high_cutoff / nyquist']
[" b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'high_cutoff' used at line 233 is defined at line 230 and has a Short-Range dependency. Variable 'nyquist' used at line 233 is defined at line 231 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
Image_Filtering
234
234
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist']
[" b, a = butter(order, [low, high], btype='band')"]
[' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'butter' used at line 234 is imported at line 5 and has a Long-Range dependency. Variable 'order' used at line 234 is defined at line 230 and has a Short-Range dependency. Variable 'low' used at line 234 is defined at line 232 and has a Short-Range dependency. Variable 'high' used at line 234 is defined at line 233 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 3}
infilling_python
Image_Filtering
235
236
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')"]
[' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal']
['', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'filtfilt' used at line 235 is imported at line 5 and has a Long-Range dependency. Variable 'b' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'a' used at line 235 is defined at line 234 and has a Short-Range dependency. Variable 'signal' used at line 235 is defined at line 230 and has a Short-Range dependency. Variable 'filtered_signal' used at line 236 is defined at line 235 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 4}
infilling_python
Image_Filtering
245
245
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]']
['cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]']
['forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'first_frame' used at line 245 is defined at line 240 and has a Short-Range dependency. Variable 'cheek_rect' used at line 245 is defined at line 243 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
Image_Filtering
246
246
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]']
['forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]']
['', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'first_frame' used at line 246 is defined at line 240 and has a Short-Range dependency. Variable 'forehead_rect' used at line 246 is defined at line 244 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
Image_Filtering
257
258
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:']
[' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]']
[' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 257}, {'reason_category': 'Loop Body', 'usage_line': 258}]
Variable 'frame' used at line 257 is part of a Loop defined at line 256 and has a Short-Range dependency. Variable 'cheek_rect' used at line 257 is defined at line 243 and has a Medium-Range dependency. Variable 'frame' used at line 258 is part of a Loop defined at line 256 and has a Short-Range dependency. Variable 'forehead_rect' used at line 258 is defined at line 244 and has a Medium-Range dependency.
{'Loop Body': 2}
{'Variable Loop Short-Range': 2, 'Variable Medium-Range': 2}
infilling_python
Image_Filtering
259
259
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]']
[' cheek_avg_green = np.mean(cheek_roi[:, :, 1])']
[' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 259}]
Library 'np' used at line 259 is imported at line 1 and has a Long-Range dependency. Variable 'cheek_roi' used at line 259 is defined at line 257 and has a Short-Range dependency.
{'Loop Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
260
260
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])']
[' forehead_avg_green = np.mean(forehead_roi[:, :, 1])']
[' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 260}]
Library 'np' used at line 260 is imported at line 1 and has a Long-Range dependency. Variable 'forehead_roi' used at line 260 is defined at line 258 and has a Short-Range dependency.
{'Loop Body': 1}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
262
262
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)']
[' forehead_avg_green_values.append(forehead_avg_green)']
['', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[{'reason_category': 'Loop Body', 'usage_line': 262}]
Variable 'forehead_avg_green_values' used at line 262 is defined at line 254 and has a Short-Range dependency. Variable 'forehead_avg_green' used at line 262 is defined at line 260 and has a Short-Range dependency.
{'Loop Body': 1}
{'Variable Short-Range': 2}
infilling_python
Image_Filtering
277
277
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)']
['forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)']
['', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Function 'bandpass_filter' used at line 277 is defined at line 230 and has a Long-Range dependency. Variable 'forehead_avg_green_values' used at line 277 is defined at line 254 and has a Medium-Range dependency. Variable 'low_cutoff' used at line 277 is defined at line 271 and has a Short-Range dependency. Variable 'high_cutoff' used at line 277 is defined at line 272 and has a Short-Range dependency. Variable 'fs' used at line 277 is defined at line 273 and has a Short-Range dependency. Variable 'order' used at line 277 is defined at line 274 and has a Short-Range dependency.
{}
{'Function Long-Range': 1, 'Variable Medium-Range': 1, 'Variable Short-Range': 4}
infilling_python
Image_Filtering
291
291
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)']
['forehead_fft = np.fft.fft(forehead_filtered_signal)']
['print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 291 is imported at line 1 and has a Long-Range dependency. Variable 'forehead_filtered_signal' used at line 291 is defined at line 277 and has a Medium-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1}
infilling_python
Image_Filtering
298
298
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)']
['freq_forehead = np.fft.fftfreq(N, d=1/Fs)']
['print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 298 is imported at line 1 and has a Long-Range dependency. Variable 'N' used at line 298 is defined at line 295 and has a Short-Range dependency. Variable 'Fs' used at line 298 is defined at line 296 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2}
infilling_python
Image_Filtering
306
306
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '']
['index_max_cheek = np.argmax(np.abs(cheek_fft))']
['index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 306 is imported at line 1 and has a Long-Range dependency. Variable 'cheek_fft' used at line 306 is defined at line 290 and has a Medium-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1}
infilling_python
Image_Filtering
307
307
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))']
['index_max_forehead = np.argmax(np.abs(forehead_fft))']
['', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Library 'np' used at line 307 is imported at line 1 and has a Long-Range dependency. Variable 'forehead_fft' used at line 307 is defined at line 291 and has a Medium-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Medium-Range': 1}
infilling_python
Image_Filtering
309
309
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '']
['freq_max_cheek = freq_cheek[index_max_cheek]']
['freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'freq_cheek' used at line 309 is defined at line 297 and has a Medium-Range dependency. Variable 'index_max_cheek' used at line 309 is defined at line 306 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
310
310
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]']
['freq_max_forehead = freq_forehead[index_max_forehead]']
['', 'heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'freq_forehead' used at line 310 is defined at line 298 and has a Medium-Range dependency. Variable 'index_max_forehead' used at line 310 is defined at line 307 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
Image_Filtering
312
313
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '']
['heart_rate_cheek = (freq_max_cheek) * 60', 'heart_rate_forehead = (freq_max_forehead) * 60']
['', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'freq_max_cheek' used at line 312 is defined at line 309 and has a Short-Range dependency. Variable 'freq_max_forehead' used at line 313 is defined at line 310 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2}
infilling_python
Image_Filtering
312
312
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '']
['heart_rate_cheek = (freq_max_cheek) * 60']
['heart_rate_forehead = (freq_max_forehead) * 60', '', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'freq_max_cheek' used at line 312 is defined at line 309 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Image_Filtering
313
313
['import numpy as np', 'import cv2', 'import matplotlib.pyplot as plt', 'from scipy.signal import convolve2d', 'from scipy.signal import butter, filtfilt', '', '# Task1', '# Gaussian blurring einstein monroe illusion', 'def gaussian2D(sigma, kernel_size):', ' sigma_x, sigma_y = sigma', ' size_x, size_y = kernel_size', '', ' size_x = int(size_x) // 2 * 2 + 1', ' size_y = int(size_y) // 2 * 2 + 1', '', ' x, y = np.meshgrid(np.linspace(-3*sigma_x, 3*sigma_x, size_x),', ' np.linspace(-3*sigma_y, 3*sigma_y, size_y))', ' ', ' kernel = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))', ' kernel /= 2 * np.pi * sigma_x * sigma_y', ' kernel /= kernel.sum()', ' return kernel', '', 'def center_crop(image, target_size):', ' image = np.array(image)', ' h, w = image.shape[:2]', '', ' left = (w - target_size[0]) // 2', ' top = (h - target_size[1]) // 2', '', ' right = left + target_size[0]', ' bottom = top + target_size[1]', '', ' cropped_image = image[top:bottom, 0:right]', ' return cropped_image', '', "img_a = cv2.imread('./marilyn.jpeg')", "img_b = cv2.imread('./einstein.jpeg')", '', '# Part1', '# Reshape to ensure images are same size by center cropping to image with smallest dimension', '# Convert img to grayscale ', 'smallest_dim = min(img_a.shape[0],img_a.shape[1],img_b.shape[1],img_b.shape[1])', 'img_a_gray = center_crop(cv2.cvtColor(img_a, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'img_b_gray = center_crop(cv2.cvtColor(img_b, cv2.COLOR_BGR2GRAY),(smallest_dim, smallest_dim))', 'print(np.array(img_a_gray))', 'print(np.array(img_b_gray))', '', '# Part2', '# Apply Gaussian filter to both images choose relevant sigma and kernel size to achieve desired results', '# Use custom gaussian2D function and use cv2.filter2D to apply the filter to the image', 'sigma_a = (1, 1)', 'kernel_size_a = (11, 11)', 'gaussian_kernel_a = gaussian2D(sigma_a, kernel_size_a)', '', 'sigma_b = (1, 1)', 'kernel_size_b = (11, 11)', 'gaussian_kernel_b = gaussian2D(sigma_b, kernel_size_b)', '', 'blur_a = cv2.filter2D(img_a_gray, -1, gaussian_kernel_a)', 'blur_b = cv2.filter2D(img_b_gray, -1, gaussian_kernel_b)', '', 'a_diff = img_a_gray - blur_a', 'img_c = blur_b + a_diff', 'print(img_c)', '', 'def downsample_image(image, factor):', ' if factor <= 0:', ' raise ValueError("Downsampling factor must be greater than 0.")', ' ', ' height, width = image.shape[:2]', ' new_height = height // factor', ' new_width = width // factor', '', ' if len(image.shape) == 3:', ' downsampled_image = np.zeros((new_height, new_width, image.shape[2]), dtype=np.uint8)', ' else:', ' downsampled_image = np.zeros((new_height, new_width), dtype=np.uint8)', '', ' for i in range(new_height):', ' for j in range(new_width):', ' downsampled_image[i, j] = image[i * factor, j * factor]', ' return downsampled_image', '', 'downsampling_factor = 4', 'downsampled_image = downsample_image(img_c, downsampling_factor)', 'print(np.array(downsampled_image))', '', '# Part3', '# Computer fourier magnitude for the final image, original grayscale images, ', '# the blurred second image and difference between grayscale first image and blurred first image', '', 'def compute_fourier_magnitude(image):', ' spectrum = np.abs(np.fft.fftshift(np.fft.fft2(image)))', ' log_spectrum = np.log(1 + spectrum)', ' return log_spectrum', '', 'spectrum_A = compute_fourier_magnitude(img_a_gray)', 'spectrum_B = compute_fourier_magnitude(img_b_gray)', 'spectrum_blurred_B = compute_fourier_magnitude(blur_b)', 'spectrum_A_blur_A = compute_fourier_magnitude(a_diff)', 'spectrum_C = compute_fourier_magnitude(img_c)', 'print(spectrum_A)', 'print(spectrum_B)', 'print(spectrum_A_blur_A)', 'print(spectrum_blurred_B)', 'print(spectrum_C)', '', '# Blend two images with a verticle blend in the middle with laplacian pyramids', '# Part 1 vertical blending halfway through image', "apple = cv2.imread('./apple.jpeg')", "orange = cv2.imread('./orange.jpeg')", 'A = cv2.resize(apple, (256,256), fx=0.5, fy=0.5)', 'B = cv2.resize(orange, (256,256), fx=0.5, fy=0.5)', '', 'G = A.copy()', 'gpA = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpA.append(G)', '', '# Gaussian pyramid for B', 'G = B.copy()', 'gpB = [G]', 'for i in range(6):', ' G = cv2.pyrDown(G)', ' gpB.append(G)', '', '# Laplacian Pyramid for A and B', 'lpA = [gpA[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpA[i])', ' L = cv2.subtract(gpA[i-1],GE)', ' lpA.append(L)', '', 'lpB = [gpB[5]]', 'for i in range(5,0,-1):', ' GE = cv2.pyrUp(gpB[i])', ' L = cv2.subtract(gpB[i-1],GE)', ' lpB.append(L)', '', '# Add left and right halves of images in each level', 'LS = []', 'for la,lb in zip(lpA,lpB):', ' rows,cols,dpt = la.shape', ' ls = np.hstack((la[:,0:cols//2], lb[:,cols//2:]))', ' LS.append(ls)', '', '# Reconstruct', 'ls_ = LS[0]', 'for i in range(1,6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each half', 'real = np.hstack((A[:,:cols//2],B[:,cols//2:]))', '', 'blended_rgb = cv2.cvtColor(ls_, cv2.COLOR_BGR2RGB)', 'original_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(blended_rgb)', 'print(original_rgb)', '', '# Part 2', '# Blend the image diagonally in a strip following the same steps as above ', '# to accomplish diagnoal blending, use a diagonal mask ', '# Create diagonal mask', 'def create_diagonal_mask(shape, strip_width=200):', ' mask = np.zeros(shape, dtype=np.float32)', ' height, width, _ = mask.shape', ' for i in range(min(height, width)):', ' mask[i, max(0, i - strip_width // 2):min(width, i + strip_width // 2), :] = 1.0', ' return mask', '', '# Now blend images using the diagonal mask', 'LS = []', 'mask = create_diagonal_mask(A.shape)', 'M = mask.copy()', 'gpmask = [M]', 'for i in range(5):', ' M = cv2.pyrDown(M)', ' gpmask.append(M)', 'gpmask.reverse()', 'for i in range(len(gpmask)):', ' rows, cols, dpt = lpA[i].shape', ' ls = lpA[i] * gpmask[i] + lpB[i] * (1 - gpmask[i])', ' LS.append(ls)', '', '# Now reconstruct', 'ls_ = LS[0]', 'for i in range(1, 6):', ' ls_ = cv2.pyrUp(ls_)', ' ls_ = cv2.resize(ls_, (LS[i].shape[1], LS[i].shape[0])) ', ' ls_ = cv2.add(ls_, LS[i])', '', '# Image with direct connecting each diagonal half', 'real = np.hstack((A[:, :cols//2], B[:, cols//2:]))', 'ls_rgb = cv2.cvtColor(ls_.astype(np.uint8), cv2.COLOR_BGR2RGB)', 'real_rgb = cv2.cvtColor(real, cv2.COLOR_BGR2RGB)', '', 'print(ls_rgb)', 'print(real_rgb)', 'print(mask)', '', '', '# Task3', '# Part1', '# Read in a video file in .avi format, choose areas of the face to focus on via bounding box', '# Apply a bandpass filter to the specified regions of the interest based on a lower and upper bound', 'def read_video_into_numpy(filename):', ' cap = cv2.VideoCapture(filename)', ' frames = []', ' while cap.isOpened():', ' ret, frame = cap.read()', ' # if frame is read correctly ret is True', ' if not ret:', ' print("Can\'t receive frame (stream end?). Exiting ...")', ' break', '', ' frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)', ' frames.append(frame)', '', ' cap.release()', ' # Converts to numpy array(T,H,W,C)', ' video = np.stack(frames, axis=0)', ' # (T,H,W,C)->(H,W,C,T)', ' video = np.transpose(video, (1,2,3,0))', ' return frames', '', 'def bandpass_filter(signal, low_cutoff, high_cutoff, fs, order):', ' nyquist = 0.5 * fs', ' low = low_cutoff / nyquist', ' high = high_cutoff / nyquist', " b, a = butter(order, [low, high], btype='band')", ' filtered_signal = filtfilt(b, a, signal)', ' return filtered_signal', '', "alice = './alice.avi'", 'video_frames = read_video_into_numpy(alice)', 'first_frame = video_frames[0]', '', '# Specify regions of interest', 'cheek_rect = [(220, 250), (320, 350)]', 'forehead_rect = [(220, 10), (500, 174)]', 'cheek_roi = first_frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', 'forehead_roi = first_frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', '', 'print(cheek_roi)', 'print(forehead_roi)', '', '# Part 2', '# Find the average green value for each frame in the cheek and forhead region of interest', 'cheek_avg_green_values = []', 'forehead_avg_green_values = []', '', 'for frame in video_frames:', ' cheek_roi = frame[cheek_rect[0][1]:cheek_rect[1][1], cheek_rect[0][0]:cheek_rect[1][0]]', ' forehead_roi = frame[forehead_rect[0][1]:forehead_rect[1][1], forehead_rect[0][0]:forehead_rect[1][0]]', ' cheek_avg_green = np.mean(cheek_roi[:, :, 1])', ' forehead_avg_green = np.mean(forehead_roi[:, :, 1])', ' cheek_avg_green_values.append(cheek_avg_green)', ' forehead_avg_green_values.append(forehead_avg_green)', '', 'print(cheek_avg_green_values)', 'print(forehead_avg_green_values)', '', '# Part3', '# Set a lower and upper threshold and apply a bandpass filter to the average green values of cheek and forward ', '# Set fs to 30', '', 'low_cutoff = 0.8', 'high_cutoff = 3', 'fs = 30', 'order = 1', '', 'cheek_filtered_signal = bandpass_filter(cheek_avg_green_values, low_cutoff, high_cutoff, fs, order)', 'forehead_filtered_signal = bandpass_filter(forehead_avg_green_values, low_cutoff, high_cutoff, fs, order)', '', 'print(cheek_filtered_signal)', 'print(forehead_filtered_signal)', '', '# Part4', '# Plot the Fourier magnitudes of these two signals using the DFT, where the x-axis is', '# frequency (in Hertz) and y-axis is amplitude. DFT coefficients are ordered in terms of', '# integer indices, so you will have to convert the indices into Hertz. For each index n = [-', '# N/2, N/2], the corresponding frequency is Fs * n / N, where N is the length of your signal', '# and Fs is the sampling rate of the signal (30 Hz in this case). You can also use', '# numpy.fft.fftfreq to do this conversion for you.', '', 'cheek_fft = np.fft.fft(cheek_filtered_signal)', 'forehead_fft = np.fft.fft(forehead_filtered_signal)', 'print(cheek_fft)', 'print(forehead_fft)', '', 'N = len(cheek_filtered_signal)', 'Fs = 30', 'freq_cheek = np.fft.fftfreq(N, d=1/Fs)', 'freq_forehead = np.fft.fftfreq(N, d=1/Fs)', 'print(np.abs(freq_cheek))', 'print(np.abs(freq_forehead))', '', '# Part5', "# Estimate the pulse rate by finding the index where np.abs(cheek_fft) is at it's maximum", '# Cheek heart rate will be aprox 60*freq_cheek[index of max np.abs(cheek_fft)] -> same idea with forhead', '', 'index_max_cheek = np.argmax(np.abs(cheek_fft))', 'index_max_forehead = np.argmax(np.abs(forehead_fft))', '', 'freq_max_cheek = freq_cheek[index_max_cheek]', 'freq_max_forehead = freq_forehead[index_max_forehead]', '', 'heart_rate_cheek = (freq_max_cheek) * 60']
['heart_rate_forehead = (freq_max_forehead) * 60']
['', 'print(f"Heart Rate (Cheek): {heart_rate_cheek:.2f} beats per minute")', 'print(f"Heart Rate (Forehead): {heart_rate_forehead:.2f} beats per minute")']
[]
Variable 'freq_max_forehead' used at line 313 is defined at line 310 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
23
24
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset']
['bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)']
['df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Library 'pd' used at line 23 is imported at line 2 and has a Medium-Range dependency. Variable 'BANK_PATH' used at line 23 is defined at line 20 and has a Short-Range dependency. Library 'pd' used at line 24 is imported at line 2 and has a Medium-Range dependency. Variable 'bank' used at line 24 is defined at line 23 and has a Short-Range dependency.
{}
{'Library Medium-Range': 2, 'Variable Short-Range': 2}
infilling_python
Credit_Scoring_Fairness
50
51
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model']
['gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)']
['', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Library 'GaussianNB' used at line 50 is imported at line 7 and has a Long-Range dependency. Variable 'gnb' used at line 51 is defined at line 50 and has a Short-Range dependency. Variable 'x_train' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'y_train' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'x_test' used at line 51 is defined at line 39 and has a Medium-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1, 'Variable Medium-Range': 3}
infilling_python
Credit_Scoring_Fairness
51
51
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()']
['gnb_pred = gnb.fit(x_train, y_train).predict(x_test)']
['', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 51 is defined at line 50 and has a Short-Range dependency. Variable 'x_train' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'y_train' used at line 51 is defined at line 39 and has a Medium-Range dependency. Variable 'x_test' used at line 51 is defined at line 39 and has a Medium-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Medium-Range': 3}
infilling_python
Credit_Scoring_Fairness
56
56
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test"]
["test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)"]
["test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'test_df' used at line 56 is defined at line 55 and has a Short-Range dependency. Library 'pd' used at line 56 is imported at line 2 and has a Long-Range dependency. Variable 'gnb_pred' used at line 56 is defined at line 51 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2, 'Library Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
61
61
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix']
["confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Library 'confusion_matrix' used at line 61 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 61 is defined at line 57 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
64
64
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy']
['accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])']
["print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_mat' used at line 64 is defined at line 61 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
74
74
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model']
['gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)']
['test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 74 is defined at line 50 and has a Medium-Range dependency. Variable 'x_train' used at line 74 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 74 is defined at line 39 and has a Long-Range dependency. Variable 'test_single' used at line 74 is defined at line 70 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Long-Range': 2, 'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
77
77
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test"]
["test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)"]
["test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'test_df' used at line 77 is defined at line 76 and has a Short-Range dependency. Library 'pd' used at line 77 is imported at line 2 and has a Long-Range dependency. Variable 'gnb_pred_single' used at line 77 is defined at line 74 and has a Short-Range dependency.
{}
{'Variable Short-Range': 2, 'Library Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
79
79
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])"]
["confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Library 'confusion_matrix' used at line 79 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 79 is defined at line 78 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
81
81
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)"]
['accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])']
["print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_single' used at line 81 is defined at line 79 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
85
90
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model']
['gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 85 is defined at line 50 and has a Long-Range dependency. Variable 'x_train' used at line 85 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 85 is defined at line 39 and has a Long-Range dependency. Variable 'test_married' used at line 85 is defined at line 69 and has a Medium-Range dependency. Variable 'test_married' used at line 86 is defined at line 69 and has a Medium-Range dependency. Variable 'test_df' used at line 87 is defined at line 86 and has a Short-Range dependency. Variable 'y_test' used at line 87 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 88 is defined at line 86 and has a Short-Range dependency. Library 'pd' used at line 88 is imported at line 2 and has a Long-Range dependency. Variable 'gnb_pred_married' used at line 88 is defined at line 85 and has a Short-Range dependency. Variable 'test_df' used at line 88 is defined at line 87 and has a Short-Range dependency. Variable 'test_df' used at line 89 is defined at line 88 and has a Short-Range dependency. Library 'confusion_matrix' used at line 90 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 90 is defined at line 86 and has a Short-Range dependency.
{}
{'Variable Long-Range': 4, 'Variable Medium-Range': 2, 'Variable Short-Range': 6, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
92
92
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)"]
['accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])']
["print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_married' used at line 92 is defined at line 90 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
96
101
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model']
['gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 96 is defined at line 50 and has a Long-Range dependency. Variable 'x_train' used at line 96 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 96 is defined at line 39 and has a Long-Range dependency. Variable 'test_divorced' used at line 96 is defined at line 71 and has a Medium-Range dependency. Variable 'test_divorced' used at line 97 is defined at line 71 and has a Medium-Range dependency. Variable 'test_df' used at line 98 is defined at line 97 and has a Short-Range dependency. Variable 'y_test' used at line 98 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 99 is defined at line 98 and has a Short-Range dependency. Library 'pd' used at line 99 is imported at line 2 and has a Long-Range dependency. Variable 'gnb_pred_divorced' used at line 99 is defined at line 96 and has a Short-Range dependency. Variable 'test_df' used at line 99 is defined at line 98 and has a Short-Range dependency. Variable 'test_df' used at line 100 is defined at line 99 and has a Short-Range dependency. Library 'confusion_matrix' used at line 101 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 101 is defined at line 100 and has a Short-Range dependency.
{}
{'Variable Long-Range': 4, 'Variable Medium-Range': 2, 'Variable Short-Range': 6, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
103
103
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)"]
['accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])']
["print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_divorced' used at line 103 is defined at line 101 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
108
108
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN']
['PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]']
["print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_married' used at line 108 is defined at line 90 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 1}
infilling_python
Credit_Scoring_Fairness
110
110
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)"]
['PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]']
["print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_single' used at line 110 is defined at line 79 and has a Long-Range dependency.
{}
{'Variable Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
112
112
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)"]
['PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]']
["print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_divorced' used at line 112 is defined at line 101 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 1}
infilling_python
Credit_Scoring_Fairness
117
117
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN']
['TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])']
["print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_married' used at line 117 is defined at line 90 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 1}
infilling_python
Credit_Scoring_Fairness
119
119
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)"]
['TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])']
["print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_single' used at line 119 is defined at line 79 and has a Long-Range dependency.
{}
{'Variable Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
127
127
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP']
['FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])']
["print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_married' used at line 127 is defined at line 90 and has a Long-Range dependency.
{}
{'Variable Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
129
129
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)"]
['FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])']
["print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_single' used at line 129 is defined at line 79 and has a Long-Range dependency.
{}
{'Variable Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
131
131
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)"]
['FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])']
["print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_divorced' used at line 131 is defined at line 101 and has a Medium-Range dependency.
{}
{'Variable Medium-Range': 1}
infilling_python
Credit_Scoring_Fairness
150
152
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single']
["x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)"]
['# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'x_test_single' used at line 150 is defined at line 143 and has a Short-Range dependency. Variable 'x_test_single' used at line 151 is defined at line 150 and has a Short-Range dependency. Variable 'x_test_single' used at line 152 is defined at line 151 and has a Short-Range dependency.
{}
{'Variable Short-Range': 3}
infilling_python
Credit_Scoring_Fairness
154
156
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced']
["x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)"]
['', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'x_test_divorced' used at line 154 is defined at line 144 and has a Short-Range dependency. Variable 'x_test_divorced' used at line 155 is defined at line 154 and has a Short-Range dependency. Variable 'x_test_divorced' used at line 156 is defined at line 155 and has a Short-Range dependency.
{}
{'Variable Short-Range': 3}
infilling_python
Credit_Scoring_Fairness
159
165
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model']
['gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 159 is defined at line 50 and has a Long-Range dependency. Variable 'x_train_unawareness' used at line 159 is defined at line 135 and has a Medium-Range dependency. Variable 'y_train' used at line 159 is defined at line 39 and has a Long-Range dependency. Variable 'x_test_single' used at line 159 is defined at line 152 and has a Short-Range dependency. Variable 'x_test_single' used at line 160 is defined at line 152 and has a Short-Range dependency. Variable 'test_df' used at line 161 is defined at line 160 and has a Short-Range dependency. Variable 'y_test' used at line 161 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 162 is defined at line 161 and has a Short-Range dependency. Library 'pd' used at line 162 is imported at line 2 and has a Long-Range dependency. Variable 'gnb_pred_single' used at line 162 is defined at line 159 and has a Short-Range dependency. Variable 'test_df' used at line 163 is defined at line 162 and has a Short-Range dependency. Library 'confusion_matrix' used at line 165 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 165 is defined at line 163 and has a Short-Range dependency.
{}
{'Variable Long-Range': 3, 'Variable Medium-Range': 1, 'Variable Short-Range': 7, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
171
176
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model']
['gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 171 is defined at line 50 and has a Long-Range dependency. Variable 'x_train_unawareness' used at line 171 is defined at line 138 and has a Long-Range dependency. Variable 'y_train' used at line 171 is defined at line 39 and has a Long-Range dependency. Variable 'x_test_married' used at line 171 is defined at line 148 and has a Medium-Range dependency. Variable 'x_test_married' used at line 172 is defined at line 148 and has a Medium-Range dependency. Variable 'test_df' used at line 173 is defined at line 172 and has a Short-Range dependency. Variable 'y_test' used at line 173 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 174 is defined at line 173 and has a Short-Range dependency. Library 'pd' used at line 174 is imported at line 2 and has a Long-Range dependency. Variable 'gnb_pred_married' used at line 174 is defined at line 171 and has a Short-Range dependency. Variable 'test_df' used at line 175 is defined at line 174 and has a Short-Range dependency. Library 'confusion_matrix' used at line 176 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 176 is defined at line 175 and has a Short-Range dependency.
{}
{'Variable Long-Range': 4, 'Variable Medium-Range': 2, 'Variable Short-Range': 5, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
178
178
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)"]
['accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])']
["print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_married' used at line 178 is defined at line 176 and has a Short-Range dependency.
{}
{'Variable Short-Range': 1}
infilling_python
Credit_Scoring_Fairness
182
187
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model']
['gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'gnb' used at line 182 is defined at line 50 and has a Long-Range dependency. Variable 'x_train_unawareness' used at line 182 is defined at line 138 and has a Long-Range dependency. Variable 'y_train' used at line 182 is defined at line 39 and has a Long-Range dependency. Variable 'x_test_divorced' used at line 182 is defined at line 156 and has a Medium-Range dependency. Variable 'x_test_divorced' used at line 183 is defined at line 156 and has a Medium-Range dependency. Variable 'test_df' used at line 184 is defined at line 183 and has a Short-Range dependency. Variable 'y_test' used at line 184 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 185 is defined at line 184 and has a Short-Range dependency. Library 'pd' used at line 185 is imported at line 2 and has a Long-Range dependency. Variable 'gnb_pred_divorced' used at line 185 is defined at line 182 and has a Short-Range dependency. Variable 'test_df' used at line 186 is defined at line 185 and has a Short-Range dependency. Library 'confusion_matrix' used at line 187 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 187 is defined at line 186 and has a Short-Range dependency.
{}
{'Variable Long-Range': 4, 'Variable Medium-Range': 2, 'Variable Short-Range': 5, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
194
199
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')"]
['rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'rf' used at line 194 is defined at line 193 and has a Short-Range dependency. Variable 'x_train' used at line 194 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 194 is defined at line 39 and has a Long-Range dependency. Variable 'x_test' used at line 194 is defined at line 39 and has a Long-Range dependency. Variable 'x_test' used at line 195 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 196 is defined at line 195 and has a Short-Range dependency. Variable 'y_test' used at line 196 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 197 is defined at line 196 and has a Short-Range dependency. Library 'pd' used at line 197 is imported at line 2 and has a Long-Range dependency. Variable 'rf_pred' used at line 197 is defined at line 194 and has a Short-Range dependency. Variable 'test_df' used at line 197 is defined at line 196 and has a Short-Range dependency. Variable 'test_df' used at line 198 is defined at line 197 and has a Short-Range dependency. Library 'confusion_matrix' used at line 199 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 199 is defined at line 198 and has a Short-Range dependency.
{}
{'Variable Short-Range': 7, 'Variable Long-Range': 5, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
211
216
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model']
['rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'rf' used at line 211 is defined at line 193 and has a Medium-Range dependency. Variable 'test_single' used at line 211 is defined at line 207 and has a Short-Range dependency. Variable 'x_train' used at line 211 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 211 is defined at line 39 and has a Long-Range dependency. Variable 'test_single' used at line 212 is defined at line 207 and has a Short-Range dependency. Variable 'test_df' used at line 213 is defined at line 212 and has a Short-Range dependency. Variable 'y_test' used at line 213 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 214 is defined at line 213 and has a Short-Range dependency. Library 'pd' used at line 214 is imported at line 2 and has a Long-Range dependency. Variable 'rf_pred_single' used at line 214 is defined at line 211 and has a Short-Range dependency. Variable 'test_df' used at line 215 is defined at line 214 and has a Short-Range dependency. Library 'confusion_matrix' used at line 216 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 216 is defined at line 215 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 7, 'Variable Long-Range': 3, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
264
264
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP']
['FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])']
["print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'confusion_matrix_married' used at line 264 is defined at line 227 and has a Long-Range dependency.
{}
{'Variable Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
329
329
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0"]
["svm = svm_model.SVC(class_weight='balanced') "]
['svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Library 'svm_model' used at line 329 is imported at line 10 and has a Long-Range dependency.
{}
{'Library Long-Range': 1}
infilling_python
Credit_Scoring_Fairness
330
336
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') "]
['svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'svm' used at line 330 is defined at line 329 and has a Short-Range dependency. Variable 'x_train' used at line 330 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 330 is defined at line 39 and has a Long-Range dependency. Variable 'x_test' used at line 330 is defined at line 39 and has a Long-Range dependency. Variable 'x_test' used at line 332 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 333 is defined at line 332 and has a Short-Range dependency. Variable 'y_test' used at line 333 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 334 is defined at line 333 and has a Short-Range dependency. Library 'pd' used at line 334 is imported at line 2 and has a Long-Range dependency. Variable 'svm_pred' used at line 334 is defined at line 330 and has a Short-Range dependency. Variable 'test_df' used at line 335 is defined at line 334 and has a Short-Range dependency. Library 'confusion_matrix' used at line 336 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 336 is defined at line 335 and has a Short-Range dependency.
{}
{'Variable Short-Range': 6, 'Variable Long-Range': 5, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
350
355
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM']
['svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'svm' used at line 350 is defined at line 329 and has a Medium-Range dependency. Variable 'test_single' used at line 350 is defined at line 346 and has a Short-Range dependency. Variable 'x_train' used at line 350 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 350 is defined at line 39 and has a Long-Range dependency. Variable 'test_single' used at line 351 is defined at line 346 and has a Short-Range dependency. Variable 'test_df' used at line 352 is defined at line 351 and has a Short-Range dependency. Variable 'y_test' used at line 352 is defined at line 39 and has a Long-Range dependency. Variable 'test_df' used at line 353 is defined at line 352 and has a Short-Range dependency. Library 'pd' used at line 353 is imported at line 2 and has a Long-Range dependency. Variable 'svm_pred_single' used at line 353 is defined at line 350 and has a Short-Range dependency. Variable 'test_df' used at line 354 is defined at line 353 and has a Short-Range dependency. Library 'confusion_matrix' used at line 355 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 355 is defined at line 354 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 1, 'Variable Short-Range': 7, 'Variable Long-Range': 3, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
459
464
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM']
['svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])"]
["print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model', 'linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Variable 'svm' used at line 459 is defined at line 329 and has a Long-Range dependency. Variable 'x_train_unawareness' used at line 459 is defined at line 415 and has a Long-Range dependency. Variable 'y_train' used at line 459 is defined at line 39 and has a Long-Range dependency. Variable 'x_test_divorced' used at line 459 is defined at line 434 and has a Medium-Range dependency. Variable 'x_test_divorced' used at line 460 is defined at line 434 and has a Medium-Range dependency. Variable 'test_df' used at line 461 is defined at line 460 and has a Short-Range dependency. Variable 'test_df' used at line 462 is defined at line 461 and has a Short-Range dependency. Library 'pd' used at line 462 is imported at line 2 and has a Long-Range dependency. Variable 'svm_pred_divorced' used at line 462 is defined at line 459 and has a Short-Range dependency. Variable 'test_df' used at line 463 is defined at line 462 and has a Short-Range dependency. Library 'confusion_matrix' used at line 464 is imported at line 8 and has a Long-Range dependency. Variable 'test_df' used at line 464 is defined at line 463 and has a Short-Range dependency.
{}
{'Variable Long-Range': 3, 'Variable Medium-Range': 2, 'Variable Short-Range': 5, 'Library Long-Range': 2}
infilling_python
Credit_Scoring_Fairness
470
472
['import os', 'import pandas as pd', 'import numpy as np', 'import matplotlib.pyplot as plt', 'import matplotlib.cm as cm', 'from sklearn.model_selection import train_test_split', 'from sklearn.naive_bayes import GaussianNB', 'from sklearn.metrics import confusion_matrix', 'from sklearn.ensemble import RandomForestClassifier', 'from sklearn import svm as svm_model', 'from sklearn import linear_model', 'import warnings', 'from pandas.core.common import SettingWithCopyWarning', '# Disable the specific warning', 'warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)', '', '# Get the directory containing this script', 'current_dir = os.path.dirname(os.path.realpath(__file__)) ', '# Construct the full path to the dataset', "BANK_PATH = os.path.join(current_dir, 'bankfullclean.csv')", '', '# Load the dataset', 'bank = pd.read_csv(BANK_PATH)', 'df = pd.DataFrame(bank)', 'df = df.sample(n=500, random_state=1234).reset_index(drop=True)', "dummy = df[['catAge', 'job', 'marital','education','balance','day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'Fmonth', 'biDefault', 'biHousing', 'biLoan']]", 'dummy = pd.get_dummies(data=dummy)', '', '', '# Machine Learning Classification Model', '# Split the dataset into training and testing sets', 'def get_naive_dataset(dataset):', ' dataset = dataset.sample(frac=1, random_state=1234).reset_index(drop=True)', ' X = dummy', " y = dataset['biY']", ' x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)', ' return (x_train, y_train), (x_test, y_test)', '', '(x_train, y_train), (x_test, y_test) = get_naive_dataset(df)', '# Print the shape of the training and testing sets', "print('x_train.shape: ', x_train.shape)", "print('x_test.shape: ', x_test.shape)", '# Print the columns of the training and testing sets', "print('x_train.columns.values: ', x_train.columns.values)", "print('y_train.values: ', y_train.values)", "print('x_test.columns.values: ', x_test.columns.values)", "print('y_test.values: ', y_test.values)", '', '# Naive Bayes Model', 'gnb = GaussianNB()', 'gnb_pred = gnb.fit(x_train, y_train).predict(x_test)', '', '# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', '', '# Confusion Matrix', "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Calculate Accuracy', 'accuracy_naive_bayes = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_naive_bayes: ', accuracy_naive_bayes)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married subset with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced subset with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_Naive_Bayes_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_Naive_Bayes_married: ', PR_Naive_Bayes_married)", 'PR_Naive_Bayes_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_Naive_Bayes_single: ',PR_Naive_Bayes_single)", 'PR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_Naive_Bayes_divorced: ', PR_Naive_Bayes_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_Naive_Bayes_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_Naive_Bayes_married: ', TPR_Naive_Bayes_married)", 'TPR_Naive_Bayes_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_Naive_Bayes_single: ', TPR_Naive_Bayes_single)", 'TPR_Naive_Bayes_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_Naive_Bayes_divorced: ', TPR_Naive_Bayes_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_Naive_Bayes_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_Naive_Bayes_married: ', FNR_Naive_Bayes_married)", 'FNR_Naive_Bayes_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_Naive_Bayes_single: ', FNR_Naive_Bayes_single)", 'FNR_Naive_Bayes_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_Naive_Bayes_divorced: ', FNR_Naive_Bayes_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married,', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Naive Bayes Model', 'gnb_pred_single = gnb.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", '', "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_naive_bayes_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_naive_bayes_single: ', accuracy_naive_bayes_single)", '', '# Test on married set with Naive Bayes Model', 'gnb_pred_married = gnb.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_naive_bayes_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_naive_bayes_married: ', accuracy_naive_bayes_married)", '', '# Test on divorced set with Naive Bayes Model', 'gnb_pred_divorced = gnb.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(gnb_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Random Forest Model ', "rf = RandomForestClassifier(max_depth=2, random_state=1234, class_weight='balanced')", 'rf_pred = rf.fit(x_train, y_train).predict(x_test)', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '# Accuracy', 'accuracy_rf = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_rf: ', accuracy_rf)", '', '# Split dataset based on marital status', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with Random Forest Model', 'rf_pred_single = rf.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married subset with Random Forest Model', 'rf_pred_married = rf.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced subset with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_naive_bayes_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_naive_bayes_divorced: ', accuracy_naive_bayes_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_rf_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_rf_married: ', PR_rf_married)", 'PR_rf_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_rf_single: ', PR_rf_single)", 'PR_rf_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_rf_divorced: ', PR_rf_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_rf_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_rf_married: ', TPR_rf_married)", 'TPR_rf_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_rf_single: ', TPR_rf_single)", 'TPR_rf_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_rf_divorced: ', TPR_rf_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_rf_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_rf_married: ', FNR_rf_married)", 'FNR_rf_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_rf_single: ', FNR_rf_single)", 'FNR_rf_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_rf_divorced: ', FNR_rf_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '# Fairness through unawareness', 'x_test_unawareness = x_test.copy()', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with Random Forest Model', 'rf_pred_single = rf.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_rf_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_rf_single: ', accuracy_rf_single)", '', '# Test on married set with Random Forest Model', 'rf_pred_married = rf.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_rf_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_rf_married: ', accuracy_rf_married)", '', '# Test on divorced set with Random Forest Model', 'rf_pred_divorced = rf.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(rf_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_rf_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_rf_divorced: ', accuracy_rf_divorced)", '', "# Support Vector Machine add class_weight='balanced' is to avoid classifying all data as 0", "svm = svm_model.SVC(class_weight='balanced') ", 'svm_pred = svm.fit(x_train, y_train).predict(x_test)', '', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_mat = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix: ', confusion_mat)", '', '# Accuracy', 'accuracy_svm = (confusion_mat[0][0] + confusion_mat[1][1]) / (confusion_mat[0][0] + confusion_mat[0][1] + confusion_mat[1][0] + confusion_mat[1][1])', "print('accuracy_svm: ', accuracy_svm)", '', '# Split dataset based on marital status', '', "test_married = x_test[x_test['marital_married'] == 1]", "test_single = x_test[x_test['marital_single'] == 1]", "test_divorced = x_test[x_test['marital_divorced'] == 1]", '', '# Test on single subset with SVM', 'svm_pred_single = svm.fit(x_train, y_train).predict(test_single)', 'test_df = test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married subset with SVM', 'svm_pred_married = svm.fit(x_train, y_train).predict(test_married)', 'test_df = test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married)", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train, y_train).predict(test_divorced)', 'test_df = test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Demographic Parity', '# positive rate = TP + TN', 'PR_svm_married = confusion_matrix_married[0,0] +confusion_matrix_married[1,1]', "print('PR_svm_married: ', PR_svm_married)", 'PR_svm_single = confusion_matrix_single[0,0] +confusion_matrix_single[1,1]', "print('PR_svm_single: ', PR_svm_single)", 'PR_svm_divorced = confusion_matrix_divorced[0,0] +confusion_matrix_divorced[1,1]', "print('PR_svm_divorced:', PR_svm_divorced)", '', '# Equalized Opportunity', '# TPR = TP/TP+FN', 'TPR_svm_married = confusion_matrix_married[0,0] / (confusion_matrix_married[0,0] + confusion_matrix_married[1,0])', "print('TPR_svm_married: ', TPR_svm_married)", 'TPR_svm_single = confusion_matrix_single[0,0] / (confusion_matrix_single[0,0] + confusion_matrix_single[1,0])', "print('TPR_svm_single: ', TPR_svm_single)", 'TPR_svm_divorced = confusion_matrix_divorced[0,0] / (confusion_matrix_divorced[0,0] + confusion_matrix_divorced[1,0])', "print('TPR_svm_divorced: ', TPR_svm_divorced)", '', '# Equalized Odds', '# TPR = TP/TP+FN', '# FNR = FN/FN+TP', 'FNR_svm_married = confusion_matrix_married[1,0] / (confusion_matrix_married[1,0] + confusion_matrix_married[0,0])', "print('FNR_svm_married: ', FNR_svm_married)", 'FNR_svm_single = confusion_matrix_single[1,0] / (confusion_matrix_single[1,0] + confusion_matrix_single[0,0])', "print('FNR_svm_single: ', FNR_svm_single)", 'FNR_svm_divorced = confusion_matrix_divorced[1,0] / (confusion_matrix_divorced[1,0] + confusion_matrix_divorced[0,0])', "print('FNR_svm_divorced: ', FNR_svm_divorced)", '', '# Fairness Through Unwareness', 'x_train_unawareness = x_train.copy()', '# x_train_unawareness', "x_train_unawareness.drop('marital_married', inplace=True, axis=1)", "x_train_unawareness.drop('marital_single', inplace=True, axis=1)", "x_train_unawareness.drop('marital_divorced', inplace=True, axis=1)", '', '#Fairness through unawareness', 'x_test_unawareness = x_test.copy()', '# x_test_unawareness', "x_test_married = x_test_unawareness[x_test_unawareness['marital_married'] == 1]", "x_test_single = x_test_unawareness[x_test_unawareness['marital_single'] == 1]", "x_test_divorced = x_test_unawareness[x_test_unawareness['marital_divorced'] == 1]", '# Drop the marital status columns for x_test_married', "x_test_married.drop('marital_married', inplace=True, axis=1)", "x_test_married.drop('marital_single', inplace=True, axis=1)", "x_test_married.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_single', "x_test_single.drop('marital_married', inplace=True, axis=1)", "x_test_single.drop('marital_single', inplace=True, axis=1)", "x_test_single.drop('marital_divorced', inplace=True, axis=1)", '# Drop the marital status columns for x_test_divorced', "x_test_divorced.drop('marital_married', inplace=True, axis=1)", "x_test_divorced.drop('marital_single', inplace=True, axis=1)", "x_test_divorced.drop('marital_divorced', inplace=True, axis=1)", '', '# Test on single set with SVM', 'svm_pred_single = svm.fit(x_train_unawareness, y_train).predict(x_test_single)', 'test_df = x_test_single.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_single, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_single = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_single: ', confusion_matrix_single)", 'accuracy_svm_single = (confusion_matrix_single[0][0] + confusion_matrix_single[1][1]) / (confusion_matrix_single[0][0] + confusion_matrix_single[0][1] + confusion_matrix_single[1][0] + confusion_matrix_single[1][1])', "print('accuracy_svm_single: ', accuracy_svm_single)", '', '# Test on married set with SVM', 'svm_pred_married = svm.fit(x_train_unawareness, y_train).predict(x_test_married)', 'test_df = x_test_married.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_married, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_married = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_married: ', confusion_matrix_married)", 'accuracy_svm_married = (confusion_matrix_married[0][0] + confusion_matrix_married[1][1]) / (confusion_matrix_married[0][0] + confusion_matrix_married[0][1] + confusion_matrix_married[1][0] + confusion_matrix_married[1][1])', "print('accuracy_svm_married: ', accuracy_svm_married) ", '', '# Test on divorced subset with SVM', 'svm_pred_divorced = svm.fit(x_train_unawareness, y_train).predict(x_test_divorced)', 'test_df = x_test_divorced.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(svm_pred_divorced, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", "confusion_matrix_divorced = confusion_matrix(test_df['biY'], test_df['pred'])", "print('confusion_matrix_divorced: ', confusion_matrix_divorced)", 'accuracy_svm_divorced = (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[1][1]) / (confusion_matrix_divorced[0][0] + confusion_matrix_divorced[0][1] + confusion_matrix_divorced[1][0] + confusion_matrix_divorced[1][1])', "print('accuracy_svm_divorced: ', accuracy_svm_divorced)", '', '# Linear Regression model']
['linear_regression_model = linear_model.LinearRegression()', 'linear_regression_model.fit(x_train, y_train)', 'prediction = linear_regression_model.predict(x_test)']
['# Result', 'test_df = x_test.copy()', "test_df['biY'] = y_test", "test_df['pred'] = pd.Series(prediction, index=test_df.index)", "test_df['accurate'] = (test_df['pred'] == test_df['biY'])", 'print("test_df[\'pred\']: ", test_df[\'pred\'])']
[]
Library 'linear_model' used at line 470 is imported at line 11 and has a Long-Range dependency. Variable 'linear_regression_model' used at line 471 is defined at line 470 and has a Short-Range dependency. Variable 'x_train' used at line 471 is defined at line 39 and has a Long-Range dependency. Variable 'y_train' used at line 471 is defined at line 39 and has a Long-Range dependency. Variable 'linear_regression_model' used at line 472 is defined at line 470 and has a Short-Range dependency. Variable 'x_test' used at line 472 is defined at line 39 and has a Long-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2, 'Variable Long-Range': 3}
infilling_python
GAN_model
31
31
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):']
[' return tf.keras.layers.LeakyReLU(.2)(x)']
['', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 31 is imported at line 16 and has a Medium-Range dependency. Variable 'x' used at line 31 is defined at line 30 and has a Short-Range dependency.
{}
{'Library Medium-Range': 1, 'Variable Short-Range': 1}
infilling_python
GAN_model
37
37
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):']
[' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))']
['', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'x' used at line 37 is defined at line 36 and has a Short-Range dependency. Library 'tf' used at line 37 is imported at line 16 and has a Medium-Range dependency.
{}
{'Variable Short-Range': 1, 'Library Medium-Range': 1}
infilling_python
GAN_model
96
98
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters']
[' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer']
[' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 96 is defined at line 85 and has a Medium-Range dependency. Variable 'kernel_size' used at line 96 is defined at line 87 and has a Short-Range dependency. Variable 'self' used at line 97 is defined at line 85 and has a Medium-Range dependency. Variable 'strides' used at line 97 is defined at line 88 and has a Short-Range dependency. Variable 'self' used at line 98 is defined at line 85 and has a Medium-Range dependency. Variable 'kernel_initializer' used at line 98 is defined at line 89 and has a Short-Range dependency.
{}
{'Variable Medium-Range': 3, 'Variable Short-Range': 3}
infilling_python
GAN_model
95
100
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)']
[' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain']
['', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 95 is defined at line 85 and has a Short-Range dependency. Variable 'filters' used at line 95 is defined at line 86 and has a Short-Range dependency. Variable 'self' used at line 96 is defined at line 85 and has a Medium-Range dependency. Variable 'kernel_size' used at line 96 is defined at line 87 and has a Short-Range dependency. Variable 'self' used at line 97 is defined at line 85 and has a Medium-Range dependency. Variable 'strides' used at line 97 is defined at line 88 and has a Short-Range dependency. Variable 'self' used at line 98 is defined at line 85 and has a Medium-Range dependency. Variable 'kernel_initializer' used at line 98 is defined at line 89 and has a Short-Range dependency. Variable 'self' used at line 99 is defined at line 85 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 99 is defined at line 90 and has a Short-Range dependency. Variable 'self' used at line 100 is defined at line 85 and has a Medium-Range dependency. Variable 'gain' used at line 100 is defined at line 91 and has a Short-Range dependency.
{}
{'Variable Short-Range': 7, 'Variable Medium-Range': 5}
infilling_python
GAN_model
104
105
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape']
[' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)']
[' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 104 is defined at line 102 and has a Short-Range dependency. Variable 'n_channels' used at line 104 is defined at line 103 and has a Short-Range dependency. Variable 'kernel_size' used at line 104 is defined at line 96 and has a Short-Range dependency. Variable 'self' used at line 105 is defined at line 102 and has a Short-Range dependency. Library 'tf' used at line 105 is imported at line 16 and has a Long-Range dependency. Variable 'fan_in' used at line 105 is defined at line 104 and has a Short-Range dependency. Variable 'gain' used at line 105 is defined at line 100 and has a Short-Range dependency.
{}
{'Variable Short-Range': 6, 'Library Long-Range': 1}
infilling_python
GAN_model
108
114
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(']
[" name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)']
['', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 109 is defined at line 102 and has a Short-Range dependency. Variable 'kernel_size' used at line 109 is defined at line 96 and has a Medium-Range dependency. Variable 'n_channels' used at line 110 is defined at line 103 and has a Short-Range dependency. Variable 'self' used at line 111 is defined at line 102 and has a Short-Range dependency. Variable 'filters' used at line 111 is defined at line 95 and has a Medium-Range dependency. Variable 'self' used at line 112 is defined at line 102 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 112 is defined at line 98 and has a Medium-Range dependency. Library 'tf' used at line 114 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 4, 'Variable Medium-Range': 3, 'Library Long-Range': 1}
infilling_python
GAN_model
117
121
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(']
[" name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 118 is defined at line 102 and has a Medium-Range dependency. Variable 'filters' used at line 118 is defined at line 95 and has a Medium-Range dependency. Variable 'self' used at line 119 is defined at line 102 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 119 is defined at line 99 and has a Medium-Range dependency. Library 'tf' used at line 121 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Medium-Range': 4, 'Library Long-Range': 1}
infilling_python
GAN_model
127
127
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,']
[' "kernel_size": self.kernel_size,']
[' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 127 is defined at line 123 and has a Short-Range dependency. Variable 'kernel_size' used at line 127 is defined at line 96 and has a Long-Range dependency.
{}
{'Variable Short-Range': 1, 'Variable Long-Range': 1}
infilling_python
GAN_model
127
130
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,']
[' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,']
[' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 127 is defined at line 123 and has a Short-Range dependency. Variable 'kernel_size' used at line 127 is defined at line 96 and has a Long-Range dependency. Variable 'self' used at line 128 is defined at line 123 and has a Short-Range dependency. Variable 'strides' used at line 128 is defined at line 97 and has a Long-Range dependency. Variable 'self' used at line 129 is defined at line 123 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 129 is defined at line 98 and has a Long-Range dependency. Variable 'self' used at line 130 is defined at line 123 and has a Short-Range dependency. Variable 'bias_initializer' used at line 130 is defined at line 99 and has a Long-Range dependency.
{}
{'Variable Short-Range': 4, 'Variable Long-Range': 4}
infilling_python
GAN_model
136
136
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):']
[" x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')"]
['', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 136 is imported at line 16 and has a Long-Range dependency. Variable 'inputs' used at line 136 is defined at line 135 and has a Short-Range dependency. Variable 'self' used at line 136 is defined at line 135 and has a Short-Range dependency. Variable 'scale' used at line 136 is defined at line 105 and has a Long-Range dependency. Variable 'w' used at line 136 is defined at line 107 and has a Medium-Range dependency. Variable 'strides' used at line 136 is defined at line 97 and has a Long-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 2, 'Variable Long-Range': 2, 'Variable Medium-Range': 1}
infilling_python
GAN_model
136
138
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):']
[" x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b']
['', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Library 'tf' used at line 136 is imported at line 16 and has a Long-Range dependency. Variable 'inputs' used at line 136 is defined at line 135 and has a Short-Range dependency. Variable 'self' used at line 136 is defined at line 135 and has a Short-Range dependency. Variable 'scale' used at line 136 is defined at line 105 and has a Long-Range dependency. Variable 'w' used at line 136 is defined at line 107 and has a Medium-Range dependency. Variable 'strides' used at line 136 is defined at line 97 and has a Long-Range dependency. Variable 'self' used at line 138 is defined at line 135 and has a Short-Range dependency. Variable 'b' used at line 138 is defined at line 116 and has a Medium-Range dependency. Variable 'x' used at line 138 is defined at line 136 and has a Short-Range dependency.
{}
{'Library Long-Range': 1, 'Variable Short-Range': 4, 'Variable Long-Range': 2, 'Variable Medium-Range': 2}
infilling_python
GAN_model
152
154
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ']
[' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size']
[' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2DTranspose' used at line 152 is defined at line 142 and has a Short-Range dependency. Variable 'self' used at line 152 is defined at line 143 and has a Short-Range dependency. Variable 'self' used at line 153 is defined at line 143 and has a Short-Range dependency. Variable 'filters' used at line 153 is defined at line 144 and has a Short-Range dependency. Variable 'self' used at line 154 is defined at line 143 and has a Medium-Range dependency. Variable 'kernel_size' used at line 154 is defined at line 145 and has a Short-Range dependency.
{}
{'Class Short-Range': 1, 'Variable Short-Range': 4, 'Variable Medium-Range': 1}
infilling_python
GAN_model
152
158
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ']
[' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain']
['', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Class 'EqualizedConv2DTranspose' used at line 152 is defined at line 142 and has a Short-Range dependency. Variable 'self' used at line 152 is defined at line 143 and has a Short-Range dependency. Variable 'self' used at line 153 is defined at line 143 and has a Short-Range dependency. Variable 'filters' used at line 153 is defined at line 144 and has a Short-Range dependency. Variable 'self' used at line 154 is defined at line 143 and has a Medium-Range dependency. Variable 'kernel_size' used at line 154 is defined at line 145 and has a Short-Range dependency. Variable 'self' used at line 155 is defined at line 143 and has a Medium-Range dependency. Variable 'strides' used at line 155 is defined at line 146 and has a Short-Range dependency. Variable 'self' used at line 156 is defined at line 143 and has a Medium-Range dependency. Variable 'kernel_initializer' used at line 156 is defined at line 147 and has a Short-Range dependency. Variable 'self' used at line 157 is defined at line 143 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 157 is defined at line 148 and has a Short-Range dependency. Variable 'self' used at line 158 is defined at line 143 and has a Medium-Range dependency. Variable 'gain' used at line 158 is defined at line 149 and has a Short-Range dependency.
{}
{'Class Short-Range': 1, 'Variable Short-Range': 8, 'Variable Medium-Range': 5}
infilling_python
GAN_model
161
179
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):']
[' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)']
[' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'input_shape' used at line 161 is defined at line 160 and has a Short-Range dependency. Variable 'self' used at line 162 is defined at line 160 and has a Short-Range dependency. Variable 'n_channels' used at line 162 is defined at line 161 and has a Short-Range dependency. Variable 'kernel_size' used at line 162 is defined at line 154 and has a Short-Range dependency. Variable 'self' used at line 163 is defined at line 160 and has a Short-Range dependency. Library 'tf' used at line 163 is imported at line 16 and has a Long-Range dependency. Variable 'fan_in' used at line 163 is defined at line 162 and has a Short-Range dependency. Variable 'gain' used at line 163 is defined at line 158 and has a Short-Range dependency. Variable 'self' used at line 165 is defined at line 160 and has a Short-Range dependency. Variable 'self' used at line 167 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_size' used at line 167 is defined at line 154 and has a Medium-Range dependency. Variable 'self' used at line 168 is defined at line 160 and has a Short-Range dependency. Variable 'filters' used at line 168 is defined at line 153 and has a Medium-Range dependency. Variable 'n_channels' used at line 169 is defined at line 161 and has a Short-Range dependency. Variable 'self' used at line 170 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 170 is defined at line 156 and has a Medium-Range dependency. Library 'tf' used at line 172 is imported at line 16 and has a Long-Range dependency. Variable 'self' used at line 174 is defined at line 160 and has a Medium-Range dependency. Variable 'self' used at line 176 is defined at line 160 and has a Medium-Range dependency. Variable 'filters' used at line 176 is defined at line 153 and has a Medium-Range dependency. Variable 'self' used at line 177 is defined at line 160 and has a Medium-Range dependency. Variable 'bias_initializer' used at line 177 is defined at line 157 and has a Medium-Range dependency. Library 'tf' used at line 179 is imported at line 16 and has a Long-Range dependency.
{}
{'Variable Short-Range': 12, 'Library Long-Range': 3, 'Variable Medium-Range': 8}
infilling_python
GAN_model
162
170
['import sys, os', 'from pathlib import Path', '# sys.path.append(os.path.dirname(os.path.abspath(__file__)))', '', 'import sys, os', 'import glob', '# import imageio', 'import matplotlib.pyplot as plt', 'import numpy as np', 'import os', 'import PIL', 'from tensorflow.keras import layers', 'import time', 'from tensorflow.keras import Model', 'import tensorflow_addons as tfa', 'import tensorflow as tf', 'import matplotlib', 'from PIL import Image', '', '###### components ######', '#helper functions do not change', 'kernel_init = tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.)', '', 'def swish(x):', ' return tf.keras.activations.swish(x)', '', 'def relu(x):', ' return tf.keras.activations.relu(x)', '', 'def leakyrelu(x):', ' return tf.keras.layers.LeakyReLU(.2)(x)', '', 'def insnorm(x):', ' return tfa.layers.InstanceNormalization(axis=-1)(x)', '', 'def pixelnorm(x):', ' return x/tf.math.sqrt(tf.reduce_mean(x**2+(10e-8), axis = 3, keepdims=True))', '', 'def batchnorm(x):', ' return layers.BatchNormalization(axis=-1)(x)', '', '#minibatch standard deviation as dscribed in Progressive GAN Tero Karras et al. https://github.com/tkarras/progressive_growing_of_gans', 'def minibatch_stddev_layer(x, group_size=4):', ' # Minibatch must be divisible by (or smaller than) group_size.', ' group_size = tf.minimum(group_size, tf.shape(x)[0]) ', ' # [NCHW] Input shape. ', ' s = x.shape', ' # [GMCHW] Split minibatch into M groups of size G. ', ' y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) ', ' # [GMCHW] Cast to FP32.', ' y = tf.cast(y, tf.float32) ', ' # [GMCHW] Subtract mean over group. ', ' y -= tf.reduce_mean(y, axis=0, keepdims=True) ', ' # [MCHW] Calc variance over group. ', ' y = tf.reduce_mean(tf.square(y), axis=0) ', ' # [MCHW] Calc stddev over group. ', ' y = tf.sqrt(y + 1e-8) ', ' # [M111] Take average over fmaps and pixels. ', ' y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) ', ' # [M111] Cast back to original data type. ', ' y = tf.cast(y, x.dtype) ', ' # [N1HW] Replicate over group and pixels. ', ' y = tf.tile(y, [group_size, s[1], s[2], 1]) ', ' return tf.concat([x, y], axis=3)', '', 'def upsample_d(x, factor=2):', ' return layers.UpSampling2D(', " size=(factor, factor), interpolation='nearest'", ' )(x)', '', 'def upsample(x, filters, kernel_size=(3, 3), padding="same", factor=2):', ' return layers.Conv2DTranspose(filters, kernel_size,', ' strides=(factor, factor), padding=padding)(x)', '', 'def avgpooling2D(x,factor=2):', " return layers.AveragePooling2D(pool_size=(2, 2),strides=(factor, factor), padding='same')(x)", '', '', '#Build custom tensorflow layers for equalised 2d convolution, equalised 2d transpose convolution and equalised dense layer', '#scale the weights dynamically by the sqrt(gain/(kernalh*kernelw*numberchannels))', '#apply this scaling as an input into convolution/dense function, do not scale the bias', '#make sure to set the kernel initilization seed to 42 so results are consistent', '', 'class EqualizedConv2D(tf.keras.layers.Layer):', ' def __init__(self, ', ' filters, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42), ', ' bias_initializer=tf.initializers.Zeros(), ', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2D, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape', ' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' n_channels,', ' self.filters),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs, training=None):', " x = tf.nn.conv2d(inputs, filters=self.scale*self.w, strides=self.strides, padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedConv2DTranspose(tf.keras.layers.Layer):', ' def __init__(self,', ' filters,', ' kernel_size=(3,3),', ' strides=(1,1), ', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedConv2DTranspose, self).__init__(**kwargs)', ' self.filters = filters', ' self.kernel_size = kernel_size', ' self.strides = strides', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', '', ' def build(self, input_shape):', ' *_, n_channels = input_shape']
[' fan_in = self.kernel_size[0]*self.kernel_size[1]*n_channels', ' self.scale = tf.math.sqrt(self.gain/fan_in)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(*self.kernel_size,', ' self.filters,', ' n_channels),', ' initializer=self.kernel_initializer,']
[' trainable=True,', ' dtype=tf.float32)', '', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.filters,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "filters": self.filters,', ' "kernel_size": self.kernel_size,', ' "strides": self.strides,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', '', ' def call(self, inputs, training=None):', ' x = tf.nn.conv2d_transpose(inputs, filters=self.scale*self.w,', ' output_shape = (inputs.shape[1]*self.strides[0],inputs.shape[2]*self.strides[1]),', ' strides=self.strides,', " padding = 'SAME')", '', ' x = x + self.b', '', ' return x', '', 'class EqualizedDense(layers.Layer):', ' def __init__(self,', ' units=1,', ' kernel_initializer=tf.initializers.RandomNormal(seed=42),', ' bias_initializer=tf.initializers.Zeros(),', ' gain=2,', ' **kwargs):', ' ', ' super(EqualizedDense, self).__init__(**kwargs)', '', ' self.units = units', ' self.kernel_initializer = kernel_initializer', ' self.bias_initializer = bias_initializer', ' self.gain = gain', ' ', ' ', ' def build(self, input_shape):', ' ', ' *_, n_channels = input_shape', ' ', ' self.scale = tf.math.sqrt(self.gain/n_channels)', ' ', ' self.w = self.add_weight(', " name='kernel',", ' shape=(n_channels,', ' self.units),', ' initializer=self.kernel_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' self.b = self.add_weight(', " name='bias',", ' shape=(self.units,),', ' initializer=self.bias_initializer,', ' trainable=True,', ' dtype=tf.float32)', ' ', ' def get_config(self):', ' config = super().get_config()', ' config.update({', ' "units": self.units,', ' "kernel_initializer": self.kernel_initializer,', ' "bias_initializer": self.bias_initializer,', ' "gain": self.gain,', ' })', ' return config', ' ', ' def call(self, inputs):', ' return tf.matmul(inputs,self.scale*self.w) + self.b', '', '#### # Test the custom layers', 'tf.random.set_seed(42)', 'input_shape = (1, 8, 8, 3)', 'x = tf.random.normal(input_shape)', '', 'equalized_conv2d = EqualizedConv2D(filters=16, kernel_size=(3, 3))', 'output_equalized_conv2d = equalized_conv2d(x)', "print('conv2d_equal',output_equalized_conv2d.shape)", "print('conv2d_equal',output_equalized_conv2d)", '', 'equalized_transposeconv2d = EqualizedConv2DTranspose(filters=16, kernel_size=(3, 3))', 'output_equalized_transposeconv2d = equalized_transposeconv2d(x)', "print('transconv2d_equal',output_equalized_transposeconv2d.shape)", "print('transconv2d_equal',output_equalized_transposeconv2d)", '', 'equalized_dense = EqualizedDense()', 'output_equalized_dense = equalized_dense(x)', "print('dense_equal',output_equalized_dense.shape)", "print('dense_equal',output_equalized_dense)", '', ' ', '###### components ######', '', '###### U-gen ######', '# U generator initial block', 'def U_gen_bottom_init(', ' act_func,', ' norm_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (1,1,filters))', ' x = EqualizedConv2DTranspose(filters,', ' kernel_size=(4,4),', ' strides=(4,4),', ' kernel_initializer=kernel_init)(inputs)', ' ', ' x = act_func((x))', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' x = norm_func(act_func(x))', ' ', ' model = tf.keras.models.Model(inputs, [x,x])', ' return model', '', '#instantiate the U gen bottom initial block and print layer shapes', 'U_gen_bottom_init_layer_shape_lst = []', 'U_gen_bottom_init_fn = U_gen_bottom_init(leakyrelu, pixelnorm, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_init_fn.layers:', ' U_gen_bottom_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_init = tf.random.normal((1,1, 1, 512))', 'output_U_gen_bottom_init = U_gen_bottom_init_fn(x_U_gen_bottom_init)', 'print(output_U_gen_bottom_init[0])', 'print(output_U_gen_bottom_init[0].shape)', '', 'def U_gen_bottom_add(', ' act_func,', ' norm_func,', ' upsample_func,', ' input_shape,', ' filters,', ' init_filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (input_shape[0], input_shape[1], init_filters))', ' ', ' upsample = upsample_func(inputs)', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)))', ' ', ' x = norm_func(act_func(EqualizedConv2D(filters,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)))', ' ', ' model = tf.keras.models.Model(inputs, [x,upsample])', ' return model ', '', '#instantiate the U gen bottom additional blocks and print layer shapes', 'U_gen_bottom_add_fn_layer_shape_lst = []', 'U_gen_bottom_add_fn = U_gen_bottom_add(leakyrelu, pixelnorm,upsample_d, (4,4,512), 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_bottom_add_fn.layers:', ' U_gen_bottom_add_fn_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_bottom_add_fn_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_bottom_add = tf.random.normal((1,4, 4, 512))', 'U_gen_bottom_add_output = U_gen_bottom_add_fn(x_U_gen_bottom_add)', 'print(U_gen_bottom_add_output[0])', 'print(U_gen_bottom_add_output[1])', 'print(U_gen_bottom_add_output[0].shape)', 'print(U_gen_bottom_add_output[1].shape) ', '', 'def U_gen_top_init(', ' act_func,', ' filters=512,', ' kernel_init=kernel_init', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the U gen top initial blocks and print layer shapes', 'U_gen_top_init_layer_shape_lst = []', 'U_gen_top_init_fn = U_gen_top_init(leakyrelu, 512, kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_init_fn.layers:', ' U_gen_top_init_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_init_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_U_gen_top_init = tf.random.normal((1,4, 4, 512))', 'U_gen_top_init_output = U_gen_top_init_fn(x_U_gen_top_init)', 'print(U_gen_top_init_output[0])', 'print(U_gen_top_init_output[0].shape)', '', 'def U_gen_top_add(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#instantiate the U gen top additional blocks and print layer shapes', 'U_gen_top_add_layer_shape_lst = []', 'U_gen_top_add_fn = U_gen_top_add(leakyrelu, avgpooling2D, 512,512,(8,8,512), kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in U_gen_top_add_fn.layers:', ' U_gen_top_add_layer_shape_lst.append(layer.output_shape)', 'print(U_gen_top_add_layer_shape_lst)', '', 'x_U_gen_top_add = tf.random.normal((1,8, 8, 512))', 'output_U_gen_top_add = U_gen_top_add_fn(x_U_gen_top_add)', 'print(output_U_gen_top_add[0])', 'print(output_U_gen_top_add[0].shape)', '', '', 'def U_connect(top, bottom, center=None, input_shape=(4,4), filters=512):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filters)) ', ' ', ' if center == None: ', ' x = top(inputs)', ' x = bottom(x)', '', ' else:', ' h = top(inputs)', ' x, _ = center(h)', ' x = x+h', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '#unit test U_connect', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'u_conn_list_shape_lst=[]', 'U_connect_fn = U_connect(top_G, bottom_G, center_G, (4,4), 512)', 'for layer in U_connect_fn.layers:', ' u_conn_list_shape_lst.append(layer.output_shape)', 'print(u_conn_list_shape_lst)', '', 'x_U_conn = tf.random.normal((1,4, 4, 512))', 'output_U_Conn = U_connect_fn(x_U_conn)', 'print(output_U_Conn[0])', 'print(output_U_Conn[0].shape)', '', 'def U_rgb_fadein(top,', ' center,', ' bottom,', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not center == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' w, _ = center(fade_in)', ' x = x+w', '', ' x,upsample = bottom(x)', ' ', ' if x.shape[1] == 4 and upsample.shape[1] == 4:', ' fade_in = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh(fade_in)', ' else:', ' upsample = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(upsample)', ' ', ' x = EqualizedConv2D(3, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' ', ' fade_in = tf.math.tanh((1-alpha)*upsample+alpha*x)', ' ', ' model = tf.keras.models.Model([inputs,alpha], fade_in)', '', ' return model ', '', 'top_G = U_gen_top_init_fn', 'bottom_G = U_gen_bottom_init_fn', 'center_G = None', '', 'to_rgb_fadein_list_shape_lst=[]', 'U_rgb_fadein_fn = U_rgb_fadein(top_G, center_G, bottom_G,relu, avgpooling2D, (4,4), 512,512,kernel_init=tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in U_rgb_fadein_fn.layers:', ' to_rgb_fadein_list_shape_lst.append(layer.output_shape)', 'print(to_rgb_fadein_list_shape_lst)', '', 'x_to_rgb_fadein = tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([[1.0]])', 'output_U_rgb_fadein = U_rgb_fadein_fn([x_to_rgb_fadein, alpha])', 'print(output_U_rgb_fadein)', 'print(output_U_rgb_fadein.shape)', '', '###### disc ######', '', 'def final_block_disc(', ' act_func,', ' kernel_init=kernel_init,', ' filters=512', '):', ' inputs = layers.Input(shape = (4,4,filters))', ' ', ' x = minibatch_stddev_layer(inputs, group_size=4)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(x)', ' x = act_func(x)', ' ', ' x = EqualizedConv2D(filters, ', ' kernel_size=(4,4), ', ' strides=(4,4),', ' kernel_initializer=kernel_init)(x)', ' ', ' ', ' x = act_func(x)', ' x = EqualizedDense(1)(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the final disc blocks and print layer shapes', 'final_block_disc_layer_shape_lst = []', 'final_block_disc_fn = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'for layer in final_block_disc_fn.layers:', ' final_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(final_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_final_block_disc = tf.random.normal((1,4, 4, 512))', 'final_block_disc_output = final_block_disc_fn(x_final_block_disc)', 'print(final_block_disc_output)', 'print(final_block_disc_output.shape)', '', '', 'def additional_block_disc(', ' act_func,', ' downsample_func,', ' filters1,', ' filters2, ', ' image_shape,', ' kernel_init=kernel_init,', '):', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1],filters1))', ' ', ' x = act_func(EqualizedConv2D(filters1,', ' kernel_size=(3,3), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(3,3), ', ' strides=(1,1), ', ' kernel_initializer=kernel_init)(x))', ' x = downsample_func(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', '#instantiate the additional discriminator blocks and print layer shapes', 'additional_block_disc_layer_shape_lst = []', 'additional_block_disc_fn = additional_block_disc(leakyrelu, avgpooling2D,512,512,(8,8),tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42)) ', 'for layer in additional_block_disc_fn.layers:', ' additional_block_disc_layer_shape_lst.append(layer.output_shape)', 'print(additional_block_disc_layer_shape_lst)', '', '#define random tensor set see so consistent and feed through the previous instantiated block', '#print output tensor and shape', 'x_additional_block_disc = tf.random.normal((1,8, 8, 512))', 'additional_block_disc_output = additional_block_disc_fn(x_additional_block_disc)', 'print(additional_block_disc_output)', 'print(additional_block_disc_output.shape)', '', 'def connect_model(top, bottom, input_shape, filter2):', ' ', ' inputs = layers.Input(shape = (input_shape[0],input_shape[1],filter2))', ' ', ' x = top(inputs)', ' x = bottom(x)', ' ', ' model = tf.keras.models.Model(inputs, x)', ' return model', '', 'bottom = final_block_disc_fn', 'top = additional_block_disc_fn', 'conn_list_shape_lst=[]', 'connect_fn = connect_model(top, bottom, (8,8), 512)', 'for layer in connect_fn.layers:', ' conn_list_shape_lst.append(layer.output_shape)', 'print(conn_list_shape_lst)', '', 'x_conn = tf.random.normal((1,8, 8, 512))', 'output_conn = connect_fn(x_conn)', 'print(output_conn)', 'print(output_conn.shape)', '', '', 'def from_rgb_fadein(top, bottom, ', ' act_func,', ' downsample_func,', ' image_shape,', ' filters1,', ' filters2,', ' kernel_init=kernel_init,', ' ):', ' ', ' inputs = layers.Input(shape = (image_shape[0],image_shape[1], 3))', ' alpha = layers.Input(shape = (1,))', ' ', ' x = act_func(EqualizedConv2D(filters1, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(inputs))', ' x = top(x)', ' if not bottom == None:', ' h = downsample_func(inputs)', ' ', ' h = act_func(EqualizedConv2D(filters2, ', ' kernel_size=(1,1), ', ' strides=(1,1),', ' kernel_initializer=kernel_init)(h))', '', ' fade_in = (1-alpha)*h+alpha*x', ' # after from rgb', ' x = bottom(fade_in)', ' ', ' model = tf.keras.models.Model([inputs, alpha], x)', ' return model ', '', 'top = final_block_disc(leakyrelu, tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42),512) ', 'bottom = None', '', 'from_rgb_fadein_shape_lst=[]', 'from_rgb_fadein_fn = from_rgb_fadein(top, bottom, leakyrelu,avgpooling2D,(4,4), 512,512,tf.keras.initializers.RandomNormal(mean=0.0, stddev=1.0,seed=42))', 'for layer in from_rgb_fadein_fn.layers:', ' from_rgb_fadein_shape_lst.append(layer.output_shape)', 'print(from_rgb_fadein_shape_lst)', '', 'tf.random.set_seed(42)', 'x_from_rgb_fadein= tf.random.normal((1,4, 4, 3))', 'alpha = tf.constant([1.0])', 'output_from_rgb_fadein = from_rgb_fadein_fn([x_from_rgb_fadein,alpha])', 'print(output_from_rgb_fadein)', 'print(output_from_rgb_fadein.shape)']
[]
Variable 'self' used at line 162 is defined at line 160 and has a Short-Range dependency. Variable 'n_channels' used at line 162 is defined at line 161 and has a Short-Range dependency. Variable 'kernel_size' used at line 162 is defined at line 154 and has a Short-Range dependency. Variable 'self' used at line 163 is defined at line 160 and has a Short-Range dependency. Library 'tf' used at line 163 is imported at line 16 and has a Long-Range dependency. Variable 'fan_in' used at line 163 is defined at line 162 and has a Short-Range dependency. Variable 'gain' used at line 163 is defined at line 158 and has a Short-Range dependency. Variable 'self' used at line 165 is defined at line 160 and has a Short-Range dependency. Variable 'self' used at line 167 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_size' used at line 167 is defined at line 154 and has a Medium-Range dependency. Variable 'self' used at line 168 is defined at line 160 and has a Short-Range dependency. Variable 'filters' used at line 168 is defined at line 153 and has a Medium-Range dependency. Variable 'n_channels' used at line 169 is defined at line 161 and has a Short-Range dependency. Variable 'self' used at line 170 is defined at line 160 and has a Short-Range dependency. Variable 'kernel_initializer' used at line 170 is defined at line 156 and has a Medium-Range dependency.
{}
{'Variable Short-Range': 11, 'Library Long-Range': 1, 'Variable Medium-Range': 3}